diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spidercniconfigs.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spidercniconfigs.yaml new file mode 100644 index 0000000000..cae1d17ff5 --- /dev/null +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spidercniconfigs.yaml @@ -0,0 +1,497 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (unknown) + name: spidercniconfigs.spiderpool.spidernet.io +spec: + group: spiderpool.spidernet.io + names: + categories: + - spiderpool + kind: SpiderCNIConfig + listKind: SpiderCNIConfigList + plural: spidercniconfigs + shortNames: + - scc + singular: spidercniconfig + scope: Cluster + versions: + - name: v2beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec is the specification of the CNI configuration + properties: + chainCNIJsonData: + description: |- + ChainCNIJsonData is used to configure the configuration of chain CNI. + format in json. + items: + type: string + type: array + cniType: + default: custom + enum: + - macvlan + - ipvlan + - sriov + - ovs + - ib-sriov + - ipoib + - custom + type: string + coordinator: + description: CoordinationSpec defines the desired state of SpiderCoordinator. + properties: + hijackCIDR: + description: |- + HijackCIDR configure static routing tables in the pod that target these + subnets to ensure that when the pod accesses these subnets, packets + are forwarded through the host network stack, such as nodelocaldns(169.254.0.0/16) + items: + type: string + type: array + hostRuleTable: + default: 500 + description: |- + HostRuleTable specifies the table number of the routing table used + to configure the communication between the pod and the local node. + type: integer + mode: + default: auto + description: |- + Mode mode specifies the mode in which the coordinator runs, + and the configurable values include auto (default), underlay, + overlay, disabled. + enum: + - auto + - underlay + - overlay + - disabled + type: string + podCIDRType: + description: |- + CoordinatorSpec is used by SpiderCoordinator and SpiderMultusConfig + in spidermultusconfig CRD , podCIDRType should not be required, which + could be merged from SpiderCoordinator CR + but in SpiderCoordinator CRD, podCIDRType should be required + enum: + - auto + - cluster + - calico + - cilium + - none + type: string + podDefaultRouteNIC: + description: |- + PodDefaultRouteNIC PodDefaultRouteNIC is used to configure the NIC where + the pod's default route resides. the default value is empty, which means + the default route will remain at eth0. + type: string + podMACPrefix: + description: |- + PodMACPrefix the fixed MAC address prefix, the length is two bytes. + the lowest bit of the first byte must be 0, which indicates the + unicast MAC address. example: 0a:1b + type: string + podRPFilter: + default: 0 + description: |- + PodRPFilter is used for coordiantor to help set the rp_filter parameters of the pod. + Configurable values: /0/1/2. negative number means leave it as it is. + the default value is 0. + type: integer + tunePodRoutes: + default: true + description: TunePodRoutes specifies whether to tune pod routes + of multiple NICs on pods. + type: boolean + txQueueLen: + default: 0 + description: |- + TxQueueLen to set the tx_queue_len of the pod. requirement is a positive integer + the default value is 0, which means leaving it as it is. + type: integer + vethLinkAddress: + description: |- + VethLinkAddress configure a ipv4 link-local address + for veth0 device. empty means disable. default is empty. + Format is like 169.254.100.1 + type: string + type: object + customCNI: + description: OtherCniTypeConfig only used for CniType custom, valid + json format, can be empty + type: string + disableIPAM: + default: false + type: boolean + enableCoordinator: + default: true + description: if CniType was set to custom, we'll mutate this field + to be false + type: boolean + ibsriov: + properties: + enableIbKubernetes: + default: false + description: Enforces ib-sriov-cni to work with ib-kubernetes. + type: boolean + ippools: + description: SpiderpoolPools could specify the IPAM spiderpool + CNI configuration default IPv4&IPv6 pools. + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + matchMasterSubnet: + default: false + description: enable IPAM to check if the IPPools of the pod + if matched the master subnet + enum: + - true + - false + type: boolean + type: object + linkState: + default: enable + description: 'Enforces link state for the VF. Allowed values: + auto, enable, disable.' + enum: + - auto + - enable + - disable + type: string + pkey: + description: |- + infiniBand pkey for VF, this field is used by ib-kubernetes to add pkey with + guid to InfiniBand subnet manager client e.g. Mellanox UFM, OpenSM + type: string + rdmaIsolation: + default: true + description: |- + rdmaIsolation enablw RDMA CNI plugin is intended to be run as a chained CNI plugin. + it ensures isolation of RDMA traffic from other workloads in the system by moving + the associated RDMA interfaces of the provided network interface to the container's + network namespace path. + type: boolean + resourceName: + description: |- + The SR-IOV RDMA resource name of the SpiderMultusConfig. the SR-IOV RDMA resource is often + reported to kubelet by the sriov-device-plugin. + type: string + required: + - resourceName + type: object + ipoib: + properties: + ippools: + description: SpiderpoolPools could specify the IPAM spiderpool + CNI configuration default IPv4&IPv6 pools. + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + matchMasterSubnet: + default: false + description: enable IPAM to check if the IPPools of the pod + if matched the master subnet + enum: + - true + - false + type: boolean + type: object + master: + description: name of the host interface to create the link from. + type: string + required: + - master + type: object + ipvlan: + properties: + bond: + description: Optional bond configuration for the CNI. It must + not be nil if the multiple master interfaces are specified. + properties: + mode: + format: int32 + maximum: 6 + minimum: 0 + type: integer + name: + type: string + options: + type: string + required: + - mode + - name + type: object + ippools: + description: SpiderpoolPools could specify the IPAM spiderpool + CNI configuration default IPv4&IPv6 pools. + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + matchMasterSubnet: + default: false + description: enable IPAM to check if the IPPools of the pod + if matched the master subnet + enum: + - true + - false + type: boolean + type: object + master: + description: |- + The master interface(s) for the CNI configuration. At least one master interface must be specified. + If multiple master interfaces are specified, the spiderpool will create a bond device with the bondConfig + by the ifacer plugin. + items: + type: string + type: array + mtu: + default: 0 + description: explicitly set MTU to the specified value. Defaults('0' + or no value provided) to the value chosen by the kernel. + format: int32 + minimum: 0 + type: integer + rdmaResourceName: + description: |- + The RDMA resource name of the nic. the RDMA resource is often reported to kubelet by the + k8s-rdma-shared-dev-plugin. when it is not empty and spiderpool podResourceInject feature + is enabled, spiderpool can automatically inject it into the container's resources via webhook. + type: string + vlanID: + description: 'The VLAN ID for the CNI configuration, optional + and must be within the specified range: [0.4096).' + format: int32 + maximum: 4094 + minimum: 0 + type: integer + required: + - master + type: object + macvlan: + properties: + bond: + description: Optional bond configuration for the CNI. It must + not be nil if the multiple master interfaces are specified. + properties: + mode: + format: int32 + maximum: 6 + minimum: 0 + type: integer + name: + type: string + options: + type: string + required: + - mode + - name + type: object + ippools: + description: SpiderpoolPools could specify the IPAM spiderpool + CNI configuration default IPv4&IPv6 pools. + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + matchMasterSubnet: + default: false + description: enable IPAM to check if the IPPools of the pod + if matched the master subnet + enum: + - true + - false + type: boolean + type: object + master: + description: |- + The master interface(s) for the CNI configuration. At least one master interface must be specified. + If multiple master interfaces are specified, the spiderpool will create a bond device with the bondConfig + by the ifacer plugin. + items: + type: string + type: array + mtu: + default: 0 + description: explicitly set MTU to the specified value. Defaults('0' + or no value provided) to the value chosen by the kernel. + format: int32 + minimum: 0 + type: integer + rdmaResourceName: + description: |- + The RDMA resource name of the nic. the RDMA resource is often reported to kubelet by the + k8s-rdma-shared-dev-plugin. when it is not empty and spiderpool podResourceInject feature + is enabled, spiderpool can automatically inject it into the container's resources via webhook. + type: string + vlanID: + description: 'The VLAN ID for the CNI configuration, optional + and must be within the specified range: [0.4096).' + format: int32 + maximum: 4094 + minimum: 0 + type: integer + required: + - master + type: object + ovs: + properties: + bridge: + type: string + deviceID: + description: PCI address of a VF in valid sysfs format + type: string + ippools: + description: SpiderpoolPools could specify the IPAM spiderpool + CNI configuration default IPv4&IPv6 pools. + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + matchMasterSubnet: + default: false + description: enable IPAM to check if the IPPools of the pod + if matched the master subnet + enum: + - true + - false + type: boolean + type: object + trunk: + items: + properties: + id: + maximum: 4094 + minimum: 0 + type: integer + maxID: + maximum: 4094 + minimum: 0 + type: integer + minID: + maximum: 4094 + minimum: 0 + type: integer + type: object + type: array + vlan: + format: int32 + type: integer + required: + - bridge + type: object + sriov: + properties: + ippools: + description: SpiderpoolPools could specify the IPAM spiderpool + CNI configuration default IPv4&IPv6 pools. + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + matchMasterSubnet: + default: false + description: enable IPAM to check if the IPPools of the pod + if matched the master subnet + enum: + - true + - false + type: boolean + type: object + maxTxRateMbps: + description: Mbps, 0 = disable rate limiting + minimum: 0 + type: integer + minTxRateMbps: + minimum: 0 + type: integer + mtu: + default: 0 + description: explicitly set MTU to the specified value via tuning + plugin. Defaults('0' or no value provided) to the value chosen + by the kernel. + format: int32 + minimum: 0 + type: integer + rdmaIsolation: + default: false + description: |- + rdmaIsolation enable RDMA CNI plugin is intended to be run as a chained CNI plugin. + it ensures isolation of RDMA traffic from other workloads in the system by moving + the associated RDMA interfaces of the provided network interface to the container's + network namespace path. + type: boolean + resourceName: + description: |- + The SR-IOV RDMA resource name of the SpiderMultusConfig. the SR-IOV RDMA resource is often + reported to kubelet by the sriov-device-plugin. + type: string + vlanID: + description: 'The VLAN ID for the CNI configuration, optional + and must be within the specified range: [0.4096).' + format: int32 + maximum: 4094 + minimum: 0 + type: integer + required: + - resourceName + type: object + type: object + type: object + served: true + storage: true diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml index 2a68477414..2d23e5f257 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml @@ -24,14 +24,19 @@ spec: description: SpiderCoordinator is the Schema for the spidercoordinators API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -39,23 +44,24 @@ spec: description: CoordinationSpec defines the desired state of SpiderCoordinator. properties: hijackCIDR: - description: HijackCIDR configure static routing tables in the pod - that target these subnets to ensure that when the pod accesses these - subnets, packets are forwarded through the host network stack, such - as nodelocaldns(169.254.0.0/16) + description: |- + HijackCIDR configure static routing tables in the pod that target these + subnets to ensure that when the pod accesses these subnets, packets + are forwarded through the host network stack, such as nodelocaldns(169.254.0.0/16) items: type: string type: array hostRuleTable: default: 500 - description: HostRuleTable specifies the table number of the routing - table used to configure the communication between the pod and the - local node. + description: |- + HostRuleTable specifies the table number of the routing table used + to configure the communication between the pod and the local node. type: integer mode: default: auto - description: Mode mode specifies the mode in which the coordinator - runs, and the configurable values include auto (default), underlay, + description: |- + Mode mode specifies the mode in which the coordinator runs, + and the configurable values include auto (default), underlay, overlay, disabled. enum: - auto @@ -64,10 +70,11 @@ spec: - disabled type: string podCIDRType: - description: CoordinatorSpec is used by SpiderCoordinator and SpiderMultusConfig - in spidermultusconfig CRD , podCIDRType should not be required, - which could be merged from SpiderCoordinator CR but in SpiderCoordinator - CRD, podCIDRType should be required + description: |- + CoordinatorSpec is used by SpiderCoordinator and SpiderMultusConfig + in spidermultusconfig CRD , podCIDRType should not be required, which + could be merged from SpiderCoordinator CR + but in SpiderCoordinator CRD, podCIDRType should be required enum: - auto - cluster @@ -76,21 +83,23 @@ spec: - none type: string podDefaultRouteNIC: - description: PodDefaultRouteNIC PodDefaultRouteNIC is used to configure - the NIC where the pod's default route resides. the default value - is empty, which means the default route will remain at eth0. + description: |- + PodDefaultRouteNIC PodDefaultRouteNIC is used to configure the NIC where + the pod's default route resides. the default value is empty, which means + the default route will remain at eth0. type: string podMACPrefix: - description: 'PodMACPrefix the fixed MAC address prefix, the length - is two bytes. the lowest bit of the first byte must be 0, which - indicates the unicast MAC address. example: 0a:1b' + description: |- + PodMACPrefix the fixed MAC address prefix, the length is two bytes. + the lowest bit of the first byte must be 0, which indicates the + unicast MAC address. example: 0a:1b type: string podRPFilter: default: 0 - description: 'PodRPFilter is used for coordiantor to help set the - rp_filter parameters of the pod. Configurable values: /0/1/2. negative number means leave it as it is. the default - value is 0.' + description: |- + PodRPFilter is used for coordiantor to help set the rp_filter parameters of the pod. + Configurable values: /0/1/2. negative number means leave it as it is. + the default value is 0. type: integer tunePodRoutes: default: true @@ -99,14 +108,15 @@ spec: type: boolean txQueueLen: default: 0 - description: TxQueueLen to set the tx_queue_len of the pod. requirement - is a positive integer the default value is 0, which means leaving - it as it is. + description: |- + TxQueueLen to set the tx_queue_len of the pod. requirement is a positive integer + the default value is 0, which means leaving it as it is. type: integer vethLinkAddress: - description: VethLinkAddress configure a ipv4 link-local address for - veth0 device. empty means disable. default is empty. Format is like - 169.254.100.1 + description: |- + VethLinkAddress configure a ipv4 link-local address + for veth0 device. empty means disable. default is empty. + Format is like 169.254.100.1 type: string type: object status: diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spiderendpoints.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spiderendpoints.yaml index 28ba9b1a33..92dca69190 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spiderendpoints.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spiderendpoints.yaml @@ -49,14 +49,19 @@ spec: description: Spiderndpoint is the Schema for the spiderendpoints API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spiderippools.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spiderippools.yaml index cd0f40aeb5..2d37b616c8 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spiderippools.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spiderippools.yaml @@ -65,14 +65,19 @@ spec: description: SpiderIPPool is the Schema for the spiderippools API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -106,33 +111,33 @@ spec: type: string type: array namespaceAffinity: - description: A label selector is a label query over a set of resources. - The result of matchLabels and matchExpressions are ANDed. An empty - label selector matches all objects. A null label selector matches - no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -147,11 +152,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -160,33 +164,33 @@ spec: type: string type: array nodeAffinity: - description: A label selector is a label query over a set of resources. - The result of matchLabels and matchExpressions are ANDed. An empty - label selector matches all objects. A null label selector matches - no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -201,11 +205,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -214,33 +217,33 @@ spec: type: string type: array podAffinity: - description: A label selector is a label query over a set of resources. - The result of matchLabels and matchExpressions are ANDed. An empty - label selector matches all objects. A null label selector matches - no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -255,11 +258,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml index 1772b9d4f6..8b50097a58 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml @@ -23,14 +23,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -38,8 +43,9 @@ spec: description: Spec is the specification of the MultusCNIConfig properties: chainCNIJsonData: - description: ChainCNIJsonData is used to configure the configuration - of chain CNI. format in json. + description: |- + ChainCNIJsonData is used to configure the configuration of chain CNI. + format in json. items: type: string type: array @@ -58,23 +64,24 @@ spec: description: CoordinationSpec defines the desired state of SpiderCoordinator. properties: hijackCIDR: - description: HijackCIDR configure static routing tables in the - pod that target these subnets to ensure that when the pod accesses - these subnets, packets are forwarded through the host network - stack, such as nodelocaldns(169.254.0.0/16) + description: |- + HijackCIDR configure static routing tables in the pod that target these + subnets to ensure that when the pod accesses these subnets, packets + are forwarded through the host network stack, such as nodelocaldns(169.254.0.0/16) items: type: string type: array hostRuleTable: default: 500 - description: HostRuleTable specifies the table number of the routing - table used to configure the communication between the pod and - the local node. + description: |- + HostRuleTable specifies the table number of the routing table used + to configure the communication between the pod and the local node. type: integer mode: default: auto - description: Mode mode specifies the mode in which the coordinator - runs, and the configurable values include auto (default), underlay, + description: |- + Mode mode specifies the mode in which the coordinator runs, + and the configurable values include auto (default), underlay, overlay, disabled. enum: - auto @@ -83,10 +90,11 @@ spec: - disabled type: string podCIDRType: - description: CoordinatorSpec is used by SpiderCoordinator and - SpiderMultusConfig in spidermultusconfig CRD , podCIDRType should - not be required, which could be merged from SpiderCoordinator - CR but in SpiderCoordinator CRD, podCIDRType should be required + description: |- + CoordinatorSpec is used by SpiderCoordinator and SpiderMultusConfig + in spidermultusconfig CRD , podCIDRType should not be required, which + could be merged from SpiderCoordinator CR + but in SpiderCoordinator CRD, podCIDRType should be required enum: - auto - cluster @@ -95,22 +103,23 @@ spec: - none type: string podDefaultRouteNIC: - description: PodDefaultRouteNIC PodDefaultRouteNIC is used to - configure the NIC where the pod's default route resides. the - default value is empty, which means the default route will remain - at eth0. + description: |- + PodDefaultRouteNIC PodDefaultRouteNIC is used to configure the NIC where + the pod's default route resides. the default value is empty, which means + the default route will remain at eth0. type: string podMACPrefix: - description: 'PodMACPrefix the fixed MAC address prefix, the length - is two bytes. the lowest bit of the first byte must be 0, which - indicates the unicast MAC address. example: 0a:1b' + description: |- + PodMACPrefix the fixed MAC address prefix, the length is two bytes. + the lowest bit of the first byte must be 0, which indicates the + unicast MAC address. example: 0a:1b type: string podRPFilter: default: 0 - description: 'PodRPFilter is used for coordiantor to help set - the rp_filter parameters of the pod. Configurable values: /0/1/2. negative number means leave it as it is. the - default value is 0.' + description: |- + PodRPFilter is used for coordiantor to help set the rp_filter parameters of the pod. + Configurable values: /0/1/2. negative number means leave it as it is. + the default value is 0. type: integer tunePodRoutes: default: true @@ -119,14 +128,15 @@ spec: type: boolean txQueueLen: default: 0 - description: TxQueueLen to set the tx_queue_len of the pod. requirement - is a positive integer the default value is 0, which means leaving - it as it is. + description: |- + TxQueueLen to set the tx_queue_len of the pod. requirement is a positive integer + the default value is 0, which means leaving it as it is. type: integer vethLinkAddress: - description: VethLinkAddress configure a ipv4 link-local address - for veth0 device. empty means disable. default is empty. Format - is like 169.254.100.1 + description: |- + VethLinkAddress configure a ipv4 link-local address + for veth0 device. empty means disable. default is empty. + Format is like 169.254.100.1 type: string type: object customCNI: @@ -178,23 +188,25 @@ spec: - disable type: string pkey: - description: infiniBand pkey for VF, this field is used by ib-kubernetes - to add pkey with guid to InfiniBand subnet manager client e.g. - Mellanox UFM, OpenSM + description: |- + infiniBand pkey for VF, this field is used by ib-kubernetes to add pkey with + guid to InfiniBand subnet manager client e.g. Mellanox UFM, OpenSM type: string rdmaIsolation: default: true - description: rdmaIsolation enablw RDMA CNI plugin is intended - to be run as a chained CNI plugin. it ensures isolation of RDMA - traffic from other workloads in the system by moving the associated - RDMA interfaces of the provided network interface to the container's + description: |- + rdmaIsolation enablw RDMA CNI plugin is intended to be run as a chained CNI plugin. + it ensures isolation of RDMA traffic from other workloads in the system by moving + the associated RDMA interfaces of the provided network interface to the container's network namespace path. type: boolean resourceName: - description: The SR-IOV RDMA resource name of the SpiderMultusConfig. - the SR-IOV RDMA resource is often reported to kubelet by the - sriov-device-plugin. + description: |- + The SR-IOV RDMA resource name of the SpiderMultusConfig. the SR-IOV RDMA resource is often + reported to kubelet by the sriov-device-plugin. type: string + required: + - resourceName type: object ipoib: properties: @@ -222,6 +234,8 @@ spec: master: description: name of the host interface to create the link from. type: string + required: + - master type: object ipvlan: properties: @@ -264,10 +278,10 @@ spec: type: boolean type: object master: - description: The master interface(s) for the CNI configuration. - At least one master interface must be specified. If multiple - master interfaces are specified, the spiderpool will create - a bond device with the bondConfig by the ifacer plugin. + description: |- + The master interface(s) for the CNI configuration. At least one master interface must be specified. + If multiple master interfaces are specified, the spiderpool will create a bond device with the bondConfig + by the ifacer plugin. items: type: string type: array @@ -279,11 +293,10 @@ spec: minimum: 0 type: integer rdmaResourceName: - description: The RDMA resource name of the nic. the RDMA resource - is often reported to kubelet by the k8s-rdma-shared-dev-plugin. - when it is not empty and spiderpool podResourceInject feature - is enabled, spiderpool can automatically inject it into the - container's resources via webhook. + description: |- + The RDMA resource name of the nic. the RDMA resource is often reported to kubelet by the + k8s-rdma-shared-dev-plugin. when it is not empty and spiderpool podResourceInject feature + is enabled, spiderpool can automatically inject it into the container's resources via webhook. type: string vlanID: description: 'The VLAN ID for the CNI configuration, optional @@ -336,10 +349,10 @@ spec: type: boolean type: object master: - description: The master interface(s) for the CNI configuration. - At least one master interface must be specified. If multiple - master interfaces are specified, the spiderpool will create - a bond device with the bondConfig by the ifacer plugin. + description: |- + The master interface(s) for the CNI configuration. At least one master interface must be specified. + If multiple master interfaces are specified, the spiderpool will create a bond device with the bondConfig + by the ifacer plugin. items: type: string type: array @@ -351,11 +364,10 @@ spec: minimum: 0 type: integer rdmaResourceName: - description: The RDMA resource name of the nic. the RDMA resource - is often reported to kubelet by the k8s-rdma-shared-dev-plugin. - when it is not empty and spiderpool podResourceInject feature - is enabled, spiderpool can automatically inject it into the - container's resources via webhook. + description: |- + The RDMA resource name of the nic. the RDMA resource is often reported to kubelet by the + k8s-rdma-shared-dev-plugin. when it is not empty and spiderpool podResourceInject feature + is enabled, spiderpool can automatically inject it into the container's resources via webhook. type: string vlanID: description: 'The VLAN ID for the CNI configuration, optional @@ -458,16 +470,16 @@ spec: type: integer rdmaIsolation: default: false - description: rdmaIsolation enable RDMA CNI plugin is intended - to be run as a chained CNI plugin. it ensures isolation of RDMA - traffic from other workloads in the system by moving the associated - RDMA interfaces of the provided network interface to the container's + description: |- + rdmaIsolation enable RDMA CNI plugin is intended to be run as a chained CNI plugin. + it ensures isolation of RDMA traffic from other workloads in the system by moving + the associated RDMA interfaces of the provided network interface to the container's network namespace path. type: boolean resourceName: - description: The SR-IOV RDMA resource name of the SpiderMultusConfig. - the SR-IOV RDMA resource is often reported to kubelet by the - sriov-device-plugin. + description: |- + The SR-IOV RDMA resource name of the SpiderMultusConfig. the SR-IOV RDMA resource is often + reported to kubelet by the sriov-device-plugin. type: string vlanID: description: 'The VLAN ID for the CNI configuration, optional @@ -476,6 +488,8 @@ spec: maximum: 4094 minimum: 0 type: integer + required: + - resourceName type: object type: object type: object diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spiderreservedips.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spiderreservedips.yaml index 01ef3d08ec..76458f929f 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spiderreservedips.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spiderreservedips.yaml @@ -29,14 +29,19 @@ spec: description: SpiderReservedIP is the Schema for the spiderreservedips API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spidersubnets.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spidersubnets.yaml index 3e324639dd..5301a6bf3e 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spidersubnets.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spidersubnets.yaml @@ -41,14 +41,19 @@ spec: description: SpiderSubnet is the Schema for the spidersubnets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/charts/spiderpool/templates/configmap.yaml b/charts/spiderpool/templates/configmap.yaml index d208ef1ee8..879e787ab3 100644 --- a/charts/spiderpool/templates/configmap.yaml +++ b/charts/spiderpool/templates/configmap.yaml @@ -32,10 +32,16 @@ data: clusterSubnetDefaultFlexibleIPNumber: 0 {{- end }} tuneSysctlConfig: {{ .Values.spiderpoolAgent.tuneSysctlConfig }} - podResourceInject: - enabled: {{ .Values.spiderpoolController.podResourceInject.enabled }} - namespacesExclude: {{ toJson .Values.spiderpoolController.podResourceInject.namespacesExclude }} - namespacesInclude: {{ toJson .Values.spiderpoolController.podResourceInject.namespacesInclude }} + podResourceWebhookInjected: + enabled: {{ .Values.spiderpoolController.podResourceWebhookInjected.enabled }} + enabledDRAWebhook: {{ .Values.spiderpoolController.podResourceWebhookInjected.enabledDRAWebhook }} + namespacesExclude: {{ toJson .Values.spiderpoolController.podResourceWebhookInjected.namespacesExclude }} + namespacesInclude: {{ toJson .Values.spiderpoolController.podResourceWebhookInjected.namespacesInclude }} + dra: + enabled: {{ .Values.spiderpoolAgent.dra.enabled }} + cdiDir: {{ .Values.spiderpoolAgent.dra.cdiDir }} + enabledNRI: {{ .Values.spiderpoolAgent.dra.enabledNRI }} + {{- if .Values.multus.multusCNI.install }} --- kind: ConfigMap diff --git a/charts/spiderpool/templates/daemonset.yaml b/charts/spiderpool/templates/daemonset.yaml index 9cfa3ca6e5..e132070841 100644 --- a/charts/spiderpool/templates/daemonset.yaml +++ b/charts/spiderpool/templates/daemonset.yaml @@ -236,6 +236,14 @@ spec: - name: host-ns mountPath: /var/run mountPropagation: Bidirectional + {{- end }} + - name: cdi + mountPath: {{ .Values.spiderpoolAgent.dra.cdiDir }} + - name: kubelet-rootdir + mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + - name: nri-plugin + mountPath: /var/run/nri - name: config-path mountPath: /tmp/spiderpool/config-map readOnly: true @@ -333,7 +341,16 @@ spec: items: - key: entrypoint.sh path: entrypoint.sh - {{- end }} + {{- end }} + - name: kubelet-rootdir + hostPath: + path: /var/lib/kubelet/ + - name: cdi + hostPath: + path: /var/run/cdi + - name: nri-plugin + hostPath: + path: /var/run/nri {{- if .Values.spiderpoolAgent.extraVolumeMounts }} {{- include "tplvalues.render" ( dict "value" .Values.spiderpoolAgent.extraVolumeMounts "context" $ ) | nindent 6 }} {{- end }} diff --git a/charts/spiderpool/templates/deployment.yaml b/charts/spiderpool/templates/deployment.yaml index 8c8c92fa91..11a7d28798 100644 --- a/charts/spiderpool/templates/deployment.yaml +++ b/charts/spiderpool/templates/deployment.yaml @@ -173,6 +173,8 @@ spec: value: {{ .Values.ipam.gc.gcAll.intervalInSecond | quote }} - name: SPIDERPOOL_MULTUS_CONFIG_ENABLED value: {{ .Values.multus.enableMultusConfig | quote }} + - name: SPIDERPOOL_SPIDERCNI_CONFIG_ENABLED + value: {{ .Values.multus.enableSpiderCNIConfig | quote }} - name: SPIDERPOOL_CNI_CONFIG_DIR value: {{ .Values.global.cniConfHostPath | quote }} - name: SPIDERPOOL_COORDINATOR_ENABLED diff --git a/charts/spiderpool/templates/deviceclass.yaml b/charts/spiderpool/templates/deviceclass.yaml new file mode 100644 index 0000000000..61d3795916 --- /dev/null +++ b/charts/spiderpool/templates/deviceclass.yaml @@ -0,0 +1,10 @@ +{{- if .Values.spiderpoolAgent.dra.enabled}} +apiVersion: resource.k8s.io/v1beta1 +kind: DeviceClass +metadata: + name: cni.spidernet.io +spec: + selectors: + - cel: + expression: device.driver == 'dra.spidernet.io' +{{- end }} \ No newline at end of file diff --git a/charts/spiderpool/templates/role.yaml b/charts/spiderpool/templates/role.yaml index 031b9240a8..fb0fac4590 100644 --- a/charts/spiderpool/templates/role.yaml +++ b/charts/spiderpool/templates/role.yaml @@ -150,26 +150,17 @@ rules: - apiGroups: - resource.k8s.io resources: - - podschedulingcontexts - - podschedulingcontexts/status + - deviceclasses - resourceclaims - - resourceclaims/status - resourceclaimtemplates - - resourceclasses verbs: - get - list - - patch - - update - watch - apiGroups: - - spiderpool.spidernet.io + - resource.k8s.io resources: - - spiderclaimparameters - - spiderendpoints - - spidermultusconfigs - - spiderreservedips - - spidersubnets + - resourceslices verbs: - create - delete @@ -181,7 +172,13 @@ rules: - apiGroups: - spiderpool.spidernet.io resources: + - spiderclaimparameters + - spidercniconfigs - spidercoordinators + - spiderendpoints + - spidermultusconfigs + - spiderreservedips + - spidersubnets verbs: - create - delete diff --git a/charts/spiderpool/templates/tls.yaml b/charts/spiderpool/templates/tls.yaml index 19ce522fe9..89e77ee572 100644 --- a/charts/spiderpool/templates/tls.yaml +++ b/charts/spiderpool/templates/tls.yaml @@ -144,7 +144,7 @@ webhooks: - spidercoordinators sideEffects: None {{- end }} -{{- if .Values.spiderpoolController.podResourceInject.enabled }} +{{- if .Values.spiderpoolController.podResourceWebhookInjected.enabled }} - admissionReviewVersions: - v1 clientConfig: @@ -160,18 +160,18 @@ webhooks: {{- end }} failurePolicy: Fail name: pods.spiderpool.spidernet.io - {{- if or .Values.spiderpoolController.podResourceInject.namespacesExclude .Values.spiderpoolController.podResourceInject.namespacesInclude }} + {{- if or .Values.spiderpoolController.podResourceWebhookInjected.namespacesExclude .Values.spiderpoolController.podResourceWebhookInjected.namespacesInclude }} namespaceSelector: matchExpressions: - {{- if .Values.spiderpoolController.podResourceInject.namespacesExclude }} + {{- if .Values.spiderpoolController.podResourceWebhookInjected.namespacesExclude }} - key: kubernetes.io/metadata.name operator: NotIn - values: {{ toYaml .Values.spiderpoolController.podResourceInject.namespacesExclude | nindent 8 }} + values: {{ toYaml .Values.spiderpoolController.podResourceWebhookInjected.namespacesExclude | nindent 8 }} {{- end }} - {{- if .Values.spiderpoolController.podResourceInject.namespacesInclude }} + {{- if .Values.spiderpoolController.podResourceWebhookInjected.namespacesInclude }} - key: kubernetes.io/metadata.name operator: In - values: {{ toYaml .Values.spiderpoolController.podResourceInject.namespacesInclude | nindent 8 }} + values: {{ toYaml .Values.spiderpoolController.podResourceWebhookInjected.namespacesInclude | nindent 8 }} {{- end }} {{- end }} rules: diff --git a/charts/spiderpool/values.yaml b/charts/spiderpool/values.yaml index 0b7111f492..81b344674e 100644 --- a/charts/spiderpool/values.yaml +++ b/charts/spiderpool/values.yaml @@ -221,6 +221,9 @@ multus: ## @param multus.enableMultusConfig enable SpiderMultusConfig enableMultusConfig: true + ## @param multus.enableSpiderCNIConfig enable cluster-scoped SpiderCNIConfig controller + enableSpiderCNIConfig: false + multusCNI: ## @param multus.multusCNI.install enable install multus-CNI install: true @@ -386,6 +389,12 @@ spiderpoolAgent: imagePullSecrets: [] # - name: "image-pull-secret" + ## + dra: + enabled: false + cdiDir: "/var/run/cdi" + enabledNRI: false + ## @skip spiderpoolAgent.nodeSelector.kubernetes.io/os nodeSelector: kubernetes.io/os: linux @@ -679,10 +688,13 @@ spiderpoolController: ## @param spiderpoolController.enableValidatingResourcesDeletedWebhook enable validating resources deleted webhook for spiderpoolController enableValidatingResourcesDeletedWebhook: false - podResourceInject: - ## @param spiderpoolController.podResourceInject.enabled enable pod resource inject + podResourceWebhookInjected: + ## @param spiderpoolController.podResourceWebhookInjected.enabled enable pod resource inject via webhook enabled: false + ## @param spiderpoolController.podResourceWebhookInjected.enabledDRAWebhook enable pod resource inject via webhook for DRA + enabledDRAWebhook: false + ## @param spiderpoolController.podResourceInject.namespacesExclude exclude the namespaces of the pod resource inject namespacesExclude: - kube-system diff --git a/cmd/coordinator/cmd/utils.go b/cmd/coordinator/cmd/utils.go index 461e4ad26f..753538e742 100644 --- a/cmd/coordinator/cmd/utils.go +++ b/cmd/coordinator/cmd/utils.go @@ -16,7 +16,6 @@ import ( "go.uber.org/zap" "golang.org/x/sys/unix" utiliptables "k8s.io/kubernetes/pkg/util/iptables" - "k8s.io/utils/exec" "github.com/spidernet-io/spiderpool/pkg/networking/networking" ) @@ -544,18 +543,17 @@ func (c *coordinator) tunePodRoutes(logger *zap.Logger, configDefaultRouteNIC st func (c *coordinator) makeReplyPacketViaVeth(logger *zap.Logger) error { var iptablesInterface []utiliptables.Interface var ipFamily []int - execer := exec.New() markInt := getMarkInt(defaultMarkBit) switch c.ipFamily { case netlink.FAMILY_V4: - iptablesInterface = append(iptablesInterface, utiliptables.New(execer, utiliptables.ProtocolIPv4)) + iptablesInterface = append(iptablesInterface, utiliptables.New(utiliptables.ProtocolIPv4)) ipFamily = append(ipFamily, netlink.FAMILY_V4) case netlink.FAMILY_V6: - iptablesInterface = append(iptablesInterface, utiliptables.New(execer, utiliptables.ProtocolIPv6)) + iptablesInterface = append(iptablesInterface, utiliptables.New(utiliptables.ProtocolIPv6)) ipFamily = append(ipFamily, netlink.FAMILY_V6) case netlink.FAMILY_ALL: - iptablesInterface = append(iptablesInterface, utiliptables.New(execer, utiliptables.ProtocolIPv4)) - iptablesInterface = append(iptablesInterface, utiliptables.New(execer, utiliptables.ProtocolIPv6)) + iptablesInterface = append(iptablesInterface, utiliptables.New(utiliptables.ProtocolIPv4)) + iptablesInterface = append(iptablesInterface, utiliptables.New(utiliptables.ProtocolIPv6)) ipFamily = append(ipFamily, netlink.FAMILY_V4) ipFamily = append(ipFamily, netlink.FAMILY_V6) } diff --git a/cmd/spiderpool-agent/cmd/config.go b/cmd/spiderpool-agent/cmd/config.go index 34af079531..868e31edcc 100644 --- a/cmd/spiderpool-agent/cmd/config.go +++ b/cmd/spiderpool-agent/cmd/config.go @@ -20,6 +20,7 @@ import ( "github.com/spidernet-io/spiderpool/api/v1/agent/client" "github.com/spidernet-io/spiderpool/api/v1/agent/server" "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/dra" "github.com/spidernet-io/spiderpool/pkg/ipam" "github.com/spidernet-io/spiderpool/pkg/ippoolmanager" "github.com/spidernet-io/spiderpool/pkg/kubevirtmanager" @@ -135,6 +136,9 @@ type AgentContext struct { // client unixClient *client.SpiderpoolAgentAPI + // dra + draDriver *dra.Driver + // probe IsStartupProbe atomic.Bool } diff --git a/cmd/spiderpool-agent/cmd/crd_manager.go b/cmd/spiderpool-agent/cmd/crd_manager.go index f995af3ff5..33ca7bafb3 100644 --- a/cmd/spiderpool-agent/cmd/crd_manager.go +++ b/cmd/spiderpool-agent/cmd/crd_manager.go @@ -7,6 +7,8 @@ import ( "strconv" "github.com/go-logr/logr" + multusv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + resourcev1 "k8s.io/api/resource/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -16,7 +18,6 @@ import ( controllerruntimelog "sigs.k8s.io/controller-runtime/pkg/log" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/spidernet-io/spiderpool/pkg/constant" spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" ) @@ -27,10 +28,10 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(spiderpoolv2beta1.AddToScheme(scheme)) utilruntime.Must(kubevirtv1.AddToScheme(scheme)) - utilruntime.Must(netv1.AddToScheme(scheme)) + utilruntime.Must(multusv1.AddToScheme(scheme)) } -func newCRDManager() (ctrl.Manager, error) { +func newCRDManager(cfg Config) (ctrl.Manager, error) { // set logger for controller-runtime framework // The controller-runtime would print debug stack if we do not init the log previously: https://github.com/kubernetes-sigs/controller-runtime/pull/2357 ctrl.SetLogger(logr.New(controllerruntimelog.NullLogSink{})) @@ -79,5 +80,23 @@ func newCRDManager() (ctrl.Manager, error) { return nil, err } + if cfg.DRAConfig.Enabled { + if err := mgr.GetFieldIndexer().IndexField(agentContext.InnerCtx, &resourcev1.ResourceSlice{}, + resourcev1.ResourceSliceSelectorNodeName, func(raw client.Object) []string { + rs := raw.(*resourcev1.ResourceSlice) + return []string{*rs.Spec.NodeName} + }); err != nil { + return nil, err + } + + if err := mgr.GetFieldIndexer().IndexField(agentContext.InnerCtx, &resourcev1.ResourceSlice{}, + resourcev1.ResourceSliceSelectorDriver, func(raw client.Object) []string { + rs := raw.(*resourcev1.ResourceSlice) + return []string{rs.Spec.Driver} + }); err != nil { + return nil, err + } + + } return mgr, nil } diff --git a/cmd/spiderpool-agent/cmd/daemon.go b/cmd/spiderpool-agent/cmd/daemon.go index 5979df1d51..92e447c082 100644 --- a/cmd/spiderpool-agent/cmd/daemon.go +++ b/cmd/spiderpool-agent/cmd/daemon.go @@ -25,6 +25,7 @@ import ( "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" + "github.com/spidernet-io/spiderpool/pkg/dra" "github.com/spidernet-io/spiderpool/pkg/ipam" "github.com/spidernet-io/spiderpool/pkg/ippoolmanager" "github.com/spidernet-io/spiderpool/pkg/kubevirtmanager" @@ -142,7 +143,7 @@ func DaemonMain() { } logger.Info("Begin to initialize spiderpool-agent runtime manager") - mgr, err := newCRDManager() + mgr, err := newCRDManager(agentContext.Cfg) if err != nil { logger.Fatal(err.Error()) } @@ -236,7 +237,7 @@ func DaemonMain() { logger.Sugar().Fatalf("Failed to clean up socket %s: %v", agentContext.Cfg.IpamUnixSocketPath, err) } unixServer, err := newAgentOpenAPIUnixServer() - if nil != err { + if err != nil { logger.Fatal(err.Error()) } agentContext.UnixServer = unixServer @@ -252,11 +253,20 @@ func DaemonMain() { }() spiderpoolAgentAPI, err := openapi.NewAgentOpenAPIUnixClient(agentContext.Cfg.IpamUnixSocketPath) - if nil != err { + if err != nil { logger.Fatal(err.Error()) } agentContext.unixClient = spiderpoolAgentAPI + if agentContext.Cfg.DRAConfig.Enabled { + logger.Info("Starting DRA driver") + if agentContext.draDriver, err = dra.NewDriver(agentContext.InnerCtx, agentContext.CRDManager.GetClient(), agentContext.ClientSet, agentContext.Cfg.DRAConfig.EnableNRI); err != nil { + logger.Sugar().Fatalf("failed to start DRA driver: %s", err.Error()) + } + } else { + logger.Info("DRA is disabled") + } + logger.Info("Set spiderpool-agent startup probe ready") agentContext.IsStartupProbe.Store(true) @@ -291,6 +301,11 @@ func WatchSignal(sigCh chan os.Signal) { } // others... + // dra + if agentContext.draDriver != nil { + agentContext.draDriver.Stop() + } + } } diff --git a/cmd/spiderpool-controller/cmd/config.go b/cmd/spiderpool-controller/cmd/config.go index 893e2a582a..38c92510f8 100644 --- a/cmd/spiderpool-controller/cmd/config.go +++ b/cmd/spiderpool-controller/cmd/config.go @@ -98,6 +98,7 @@ var envInfo = []envConf{ {"SPIDERPOOL_MULTUS_CONFIG_ENABLED", "false", false, nil, &controllerContext.Cfg.EnableMultusConfig, nil}, {"SPIDERPOOL_MULTUS_CONFIG_INFORMER_RESYNC_PERIOD", "60", false, nil, nil, &controllerContext.Cfg.MultusConfigInformerResyncPeriod}, + {"SPIDERPOOL_SPIDERCNI_CONFIG_ENABLED", "false", false, nil, &controllerContext.Cfg.EnableSpiderCNIConfig, nil}, {"SPIDERPOOL_CILIUM_CONFIGMAP_NAMESPACE_NAME", "kube-system/cilium-config", false, &controllerContext.Cfg.CiliumConfigName, nil, nil}, {"SPIDERPOOL_CONTROLLER_DEPLOYMENT_NAME", "spiderpool-controller", true, &controllerContext.Cfg.ControllerDeploymentName, nil, nil}, @@ -161,6 +162,7 @@ type Config struct { EnableMultusConfig bool MultusConfigInformerResyncPeriod int + EnableSpiderCNIConfig bool // configmap spiderpooltypes.SpiderpoolConfigmapConfig diff --git a/cmd/spiderpool-controller/cmd/daemon.go b/cmd/spiderpool-controller/cmd/daemon.go index f6572fb465..30df544706 100644 --- a/cmd/spiderpool-controller/cmd/daemon.go +++ b/cmd/spiderpool-controller/cmd/daemon.go @@ -265,7 +265,8 @@ func initControllerServiceManagers(ctx context.Context) { } controllerContext.PodManager = podManager - if controllerContext.Cfg.PodResourceInjectConfig.Enabled { + if controllerContext.Cfg.PodResourceInjectConfig.Enabled || + (controllerContext.Cfg.PodResourceInjectConfig.EnabledDRAWebhook && controllerContext.Cfg.DRAConfig.Enabled) { logger.Info("Begin to init Pod MutatingWebhook") if err := podmanager.InitPodWebhook(controllerContext.CRDManager); err != nil { logger.Fatal(err.Error()) @@ -392,6 +393,13 @@ func initControllerServiceManagers(ctx context.Context) { logger.Fatal(err.Error()) } } + + if controllerContext.Cfg.EnableSpiderCNIConfig { + logger.Info("Begin to set up SpiderCNIConfig controller") + if err := multuscniconfig.SetupSpiderCNIConfigController(controllerContext.CRDManager, controllerContext.Leader); err != nil { + logger.Fatal(err.Error()) + } + } } func initGCManager(ctx context.Context) { diff --git a/go.mod b/go.mod index 27893cdca2..0f47ac3f68 100644 --- a/go.mod +++ b/go.mod @@ -1,18 +1,18 @@ module github.com/spidernet-io/spiderpool -go 1.25 +go 1.25.0 require ( github.com/agiledragon/gomonkey/v2 v2.11.0 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 - github.com/cilium/cilium v1.14.1 - github.com/containernetworking/cni v1.1.2 - github.com/containernetworking/plugins v1.5.1 + github.com/cilium/cilium v1.15.12 + github.com/containernetworking/cni v1.3.0 + github.com/containernetworking/plugins v1.7.1 github.com/go-openapi/errors v0.22.0 github.com/go-openapi/loads v0.21.2 github.com/go-openapi/runtime v0.26.2 github.com/go-openapi/spec v0.21.0 - github.com/go-openapi/strfmt v0.21.8 + github.com/go-openapi/strfmt v0.21.9 github.com/go-openapi/swag v0.23.0 github.com/go-openapi/validate v0.22.3 github.com/go-swagger/go-swagger v0.30.4 @@ -20,86 +20,91 @@ require ( github.com/golang/mock v1.6.0 github.com/google/gops v0.3.27 github.com/grafana/pyroscope-go v1.2.0 - github.com/jessevdk/go-flags v1.5.0 + github.com/jessevdk/go-flags v1.6.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 github.com/kdoctor-io/kdoctor v0.2.0 github.com/mdlayher/ndp v1.0.1 - github.com/onsi/ginkgo/v2 v2.22.1 - github.com/onsi/gomega v1.36.2 + github.com/onsi/ginkgo/v2 v2.27.2 + github.com/onsi/gomega v1.38.2 github.com/openkruise/kruise-api v1.3.0 - github.com/prometheus/client_golang v1.21.1 - github.com/sasha-s/go-deadlock v0.3.1 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 - github.com/spidernet-io/e2eframework v0.0.0-20240816061218-9ba7f53b8c73 + github.com/prometheus/client_golang v1.23.2 + github.com/sasha-s/go-deadlock v0.3.5 + github.com/spf13/cobra v1.10.0 + github.com/spf13/pflag v1.0.9 + github.com/spidernet-io/e2eframework v0.0.0-20251225035555-8ca1b9360edb github.com/tigera/operator v1.33.0 - github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 - go.opentelemetry.io/otel v1.25.0 + github.com/vishvananda/netlink v1.3.1 + go.opentelemetry.io/otel v1.36.0 go.opentelemetry.io/otel/exporters/prometheus v0.44.0 - go.opentelemetry.io/otel/metric v1.25.0 - go.opentelemetry.io/otel/sdk v1.24.0 - go.opentelemetry.io/otel/sdk/metric v1.24.0 - go.opentelemetry.io/otel/trace v1.25.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 + go.opentelemetry.io/otel/sdk v1.36.0 + go.opentelemetry.io/otel/sdk/metric v1.36.0 + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.uber.org/atomic v1.10.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.25.0 - golang.org/x/net v0.42.0 - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.34.0 - golang.org/x/tools v0.35.0 + go.uber.org/zap v1.27.0 + golang.org/x/net v0.47.0 + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 + golang.org/x/tools v0.38.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.4 - k8s.io/apiextensions-apiserver v0.29.4 - k8s.io/apimachinery v0.30.0-beta.0 - k8s.io/client-go v0.29.4 - k8s.io/code-generator v0.30.0-beta.0 - k8s.io/kubernetes v1.29.0 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/api v0.35.0 + k8s.io/apiextensions-apiserver v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/client-go v0.35.0 + k8s.io/code-generator v0.35.0 + k8s.io/kubernetes v1.35.0 + k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 kubevirt.io/api v1.2.0 - sigs.k8s.io/controller-runtime v0.16.1 - sigs.k8s.io/controller-tools v0.11.4 - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/controller-tools v0.17.2 + sigs.k8s.io/yaml v1.6.0 ) require ( - github.com/go-logr/logr v1.4.2 - k8s.io/klog/v2 v2.120.1 // indirect + github.com/go-logr/logr v1.4.3 + k8s.io/klog/v2 v2.130.1 // indirect ) -require github.com/google/go-cmp v0.6.0 // indirect - -require k8s.io/component-base v0.29.4 // indirect +require github.com/google/go-cmp v0.7.0 // indirect require ( + github.com/Mellanox/rdmamap v1.1.0 + github.com/containerd/nri v0.9.0 github.com/hashicorp/go-multierror v1.1.1 github.com/mdlayher/arp v0.0.0-20220221190821-c37aaafac7f9 - github.com/safchain/ethtool v0.6.1 - go.uber.org/automaxprocs v1.5.3 - k8s.io/kubectl v0.26.3 + github.com/safchain/ethtool v0.5.10 + go.uber.org/automaxprocs v1.6.0 + google.golang.org/grpc v1.72.2 + k8s.io/dynamic-resource-allocation v0.35.0 + k8s.io/kubectl v0.35.0 + k8s.io/kubelet v0.35.0 ) require ( + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect - github.com/coreos/go-iptables v0.7.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cilium/ebpf v0.16.0 // indirect + github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/ttrpc v1.2.7 // indirect + github.com/coreos/go-iptables v0.8.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -107,89 +112,105 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gobuffalo/flect v0.3.0 // indirect + github.com/gobuffalo/flect v1.0.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/knqyf263/go-plugin v0.8.1-0.20240827022226-114c6257e441 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 // indirect github.com/mdlayher/packet v1.1.2 // indirect - github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/socket v0.5.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect - github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shirou/gopsutil/v3 v3.23.5 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.16.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/viper v1.18.1 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tetratelabs/wazero v1.8.2-0.20241030035603-dc08732e57d5 // indirect github.com/tigera/api v0.0.0-20230406222214-ca74195900cb // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect - github.com/vishvananda/netns v0.0.4 // indirect + github.com/vishvananda/netns v0.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.5 // indirect go.mongodb.org/mongo-driver v1.13.1 // indirect - go.uber.org/dig v1.17.0 // indirect - golang.org/x/crypto v0.40.0 // indirect - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/mod v0.26.0 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/term v0.33.0 // indirect - golang.org/x/text v0.27.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/protobuf v1.36.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/cri-api v0.35.0 // indirect + k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/knftables v0.0.18 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/go.sum b/go.sum index e7efdd05e9..7395c2f969 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,10 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -16,7 +17,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -37,10 +37,9 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -52,10 +51,13 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Mellanox/rdmamap v1.1.0 h1:A/W1wAXw+6vm58f3VklrIylgV+eDJlPVIMaIKuxgUT4= +github.com/Mellanox/rdmamap v1.1.0/go.mod h1:fN+/V9lf10ABnDCwTaXRjeeWijLt2iVLETnK+sx/LY8= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -65,8 +67,6 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -81,30 +81,37 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.14.1 h1:8yj+DVgv7bvBkqiKL3F/nPB6ddNTnnnbye6gznAsXH4= -github.com/cilium/cilium v1.14.1/go.mod h1:ghd9LkTSbRPtJal0Bsdq1ise+j5Ezy14xgaM2o3XLCI= -github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c h1:/NqY4jLr92f7VcUJe1gHS6CgSGWFUCeD2f4QhxO8tgE= -github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c/go.mod h1:iOlDXIgPGBabS7J0Npbq8MC5+gfvUGSBISnxXIJjfgs= +github.com/cilium/cilium v1.15.12 h1:4xJAi93ddye1Tf+ozNH0vre5h5Atz5/eWbjR2WsSet4= +github.com/cilium/cilium v1.15.12/go.mod h1:9CSrURhGcZVmpTnRvmh+dsCYobcoFgBmNUTU12vOBBM= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 h1:R/QlThqx099hS6req1k2Q87fvLSRgCEicQGate9vxO4= +github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018/go.mod h1:p044XccCmONGIUbx3bJ7qvHXK0RcrdvIvbTGiu/RjUA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= -github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= -github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= -github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= -github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/nri v0.9.0 h1:jribDJs/oQ95vLO4Yn19HKFYriZGWKiG6nKWjl9Y/x4= +github.com/containerd/nri v0.9.0/go.mod h1:sDRoMy5U4YolsWthg7TjTffAwPb6LEr//83O+D3xVU4= +github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= +github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= +github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs= +github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0= +github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc= +github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -113,36 +120,44 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.11.2-0.20200112161605-a7c079c43d51+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -150,12 +165,12 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= @@ -184,8 +199,8 @@ github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.8 h1:VYBUoKYRLAlgKDrIxR/I0lKrztDQ0tuTDrbhLVP8Erg= -github.com/go-openapi/strfmt v0.21.8/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -195,6 +210,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-swagger/go-swagger v0.30.4 h1:cPrWLSXY6ZdcgfRicOj0lANg72TkTHz6uv/OlUdzO5U= github.com/go-swagger/go-swagger v0.30.4/go.mod h1:YM5D5kR9c1ft3ynMXvDk2uo/7UZHKFEqKXcAL9f4Phc= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= @@ -202,8 +219,10 @@ github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.m github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gobuffalo/flect v0.3.0 h1:erfPWM+K1rFNIQeRPdeEXxo8yFr/PO17lhRnS8FUrtk= -github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -245,8 +264,10 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -261,12 +282,14 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI= github.com/google/gops v0.3.27/go.mod h1:lYqabmfnq4Q6UumWNx96Hjup5BDAVc8zmfIy0SkNCSk= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -281,12 +304,11 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -296,7 +318,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= @@ -314,6 +335,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -324,18 +347,21 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= +github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -349,9 +375,10 @@ github.com/kdoctor-io/kdoctor v0.2.0/go.mod h1:TxkjBwM4sdnOTHABxgL1gO68tlzHUnbiu github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/knqyf263/go-plugin v0.8.1-0.20240827022226-114c6257e441 h1:Q/sZeuWkXprbKJSs7AwXryuZKSEL/a8ltC7e7xSspN0= +github.com/knqyf263/go-plugin v0.8.1-0.20240827022226-114c6257e441/go.mod h1:CvCrNDMiKFlAlLFLmcoEfsTROEfNKbEZAMMrwQnLXCM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -372,26 +399,32 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mdlayher/arp v0.0.0-20220221190821-c37aaafac7f9 h1:LxldC/UdEeJ+j3i/g5K2iPePYWXOcy6AAhCYs3VREKc= github.com/mdlayher/arp v0.0.0-20220221190821-c37aaafac7f9/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E= -github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= -github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= +github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -408,8 +441,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -432,16 +466,17 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= -github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openkruise/kruise-api v1.3.0 h1:yfEy64uXgSuX/5RwePLbwUK/uX8RRM8fHJkccel5ZIQ= github.com/openkruise/kruise-api v1.3.0/go.mod h1:9ZX+ycdHKNzcA5ezAf35xOa2Mwfa2BYagWr0lKgi5dU= github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 h1:t/CahSnpqY46sQR01SoS+Jt0jtjgmhgE6lFmRnO4q70= @@ -450,44 +485,46 @@ github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPf github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21 h1:PfiCACRd+dzB+gLQAY3ZekMo/56XZ1haOzEguVZ1ZYE= -github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b h1:dW+UhJMzusDO6hqVGuCYeDxXWAzc7HnA9CsPN+uHPnA= github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b/go.mod h1:Avoy1rTN1GfeisnHGf3WhQNqR+BuGOcwfNFsdWX6OHE= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/safchain/ethtool v0.6.1 h1:mhRnXE1H8fV8TTXh/HdqE4tXtb57r//BQh5pPYMuM5k= -github.com/safchain/ethtool v0.6.1/go.mod h1:JzoNbG8xeg/BeVeVoMCtCb3UPWoppZZbFpA+1WFh+M0= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/safchain/ethtool v0.5.10 h1:Im294gZtuf4pSGJRAOGKaASNi3wMeFaGaWuSaomedpc= +github.com/safchain/ethtool v0.5.10/go.mod h1:w9jh2Lx7YBR4UwzLkzCmWl85UY0W2uZdd7/DckVE5+c= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y= github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= @@ -498,22 +535,24 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.0 h1:a5/WeUlSDCvV5a45ljW2ZFtV0bTDpkfSAj3uqB6Sc+0= +github.com/spf13/cobra v1.10.0/go.mod h1:9dhySC7dnTtEiqzmqfkLj47BslqLCUPMXjG2lj/NgoE= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= -github.com/spidernet-io/e2eframework v0.0.0-20240816061218-9ba7f53b8c73 h1:KzfBFPaiBnT6LBVhwrabJ59o/0Vsv/9CKszUgaz1TIs= -github.com/spidernet-io/e2eframework v0.0.0-20240816061218-9ba7f53b8c73/go.mod h1:k0KYxyNjZYyEG1bsGzSbMx5Q+Z1H6oOjEq5qz9UlBzY= +github.com/spf13/pflag v1.0.8/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.1 h1:rmuU42rScKWlhhJDyXZRKJQHXFX02chSVW1IvkPGiVM= +github.com/spf13/viper v1.18.1/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spidernet-io/e2eframework v0.0.0-20251225035555-8ca1b9360edb h1:2Zn+kGP8J51LoT303usfWhJQCjUBRlmidmc1vKm9jps= +github.com/spidernet-io/e2eframework v0.0.0-20251225035555-8ca1b9360edb/go.mod h1:EhKaJ3gb9eSW9h7UrWg+4LQg9jdPbCe3ueuJ3weGNXU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -528,12 +567,23 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tetratelabs/wazero v1.8.2-0.20241030035603-dc08732e57d5 h1:F+AT6Jxxww3j4/B/wXU01Raq4J8fg/Cg2HD4XsETGaU= +github.com/tetratelabs/wazero v1.8.2-0.20241030035603-dc08732e57d5/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tigera/api v0.0.0-20230406222214-ca74195900cb h1:Y7r5Al3V235KaEoAzGBz9RYXEbwDu8CPaZoCq2PlD8w= github.com/tigera/api v0.0.0-20230406222214-ca74195900cb/go.mod h1:ZZghiX3CUsBAc0osBjRvV6y/eun2ObYdvSbjqXAoj/w= github.com/tigera/operator v1.33.0 h1:ml2d8+eADJHMxenBcMlMpC4ZRZ0bgvXGx9i6fQsKje0= @@ -544,11 +594,14 @@ github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYm github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 h1:mi+RH1U/MmAQvz2Ys7r1/8OWlGJoBvF8iCXRKk2uym4= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739/go.mod h1:0BeLktV/jHb2/Hmw1yLD7+yaIB8PDy11RCty0tCPWZg= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= +github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= @@ -565,6 +618,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3RrBP8= +go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= @@ -575,44 +630,50 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= -go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/prometheus v0.44.0 h1:08qeJgaPC0YEBu2PQMbqU3rogTlyzpjhCI2b58Yn00w= go.opentelemetry.io/otel/exporters/prometheus v0.44.0/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= -go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= -go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= -go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= -go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= -go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -623,8 +684,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -651,8 +712,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= -golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -686,7 +747,6 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -700,8 +760,8 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -714,8 +774,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -728,8 +788,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -738,6 +798,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -748,11 +809,9 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -772,17 +831,14 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -792,17 +848,22 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.1-0.20230616193735-e0c3b6e6ae3b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -815,14 +876,14 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -873,17 +934,16 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= -golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= -golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -955,18 +1015,16 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -985,6 +1043,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -998,8 +1058,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1007,6 +1067,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1027,7 +1089,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1039,45 +1100,49 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.2/go.mod h1:sYuDb3flCtRPI8ghn6qFrcK5ZBu2mhbElxRE95qpwlI= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= -k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= -k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.30.0-beta.0 h1:/gaNLWP5ynEG0ExJ+4w2YCj5/L4MU66RsWEAKciy0/g= -k8s.io/apimachinery v0.30.0-beta.0/go.mod h1:wEJvNDlfxMRaMhyv38SIHIEC9hah/xuzqUUhxIyUv7Y= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= k8s.io/client-go v0.23.2/go.mod h1:k3YbsWg6GWdHF1THHTQP88X9RhB1DWPo3Dq7KfU/D1c= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.30.0-beta.0 h1:p+51J7CG4i6Cu/cyRrpXU7zT/XaHIHv7NK/mujr0gdY= -k8s.io/code-generator v0.30.0-beta.0/go.mod h1:kvx3eylE/Y/Z2dj8ncw3CR/zjQ37ou9lc3A0Pt8xX54= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/code-generator v0.35.0 h1:TvrtfKYZTm9oDF2z+veFKSCcgZE3Igv0svY+ehCmjHQ= +k8s.io/code-generator v0.35.0/go.mod h1:iS1gvVf3c/T71N5DOGYO+Gt3PdJ6B9LYSvIyQ4FHzgc= +k8s.io/cri-api v0.35.0 h1:fxLSKyJHqbyCSUsg1rW4DRpmjSEM/elZ1GXzYTSLoDQ= +k8s.io/cri-api v0.35.0/go.mod h1:Cnt29u/tYl1Se1cBRL30uSZ/oJ5TaIp4sZm1xDLvcMc= +k8s.io/dynamic-resource-allocation v0.35.0 h1:St6dsCCylLg3HiFPcyHzFF8YQO6yziUDaVRLGdkrNH8= +k8s.io/dynamic-resource-allocation v0.35.0/go.mod h1:uaFga3VJtwyfpfZwpuJG7mlurWGQaaiGUa+QZmooz2U= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b h1:gMplByicHV/TJBizHd9aVEsTYoJBnnUAT5MHlTkbjhQ= +k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b/go.mod h1:CgujABENc3KuTrcsdpGmrrASjtQsWCT7R99mEV4U/fM= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM= -k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g= -k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg= -k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= +k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= +k8s.io/kubelet v0.35.0 h1:8cgJHCBCKLYuuQ7/Pxb/qWbJfX1LXIw7790ce9xHq7c= +k8s.io/kubelet v0.35.0/go.mod h1:ciRzAXn7C4z5iB7FhG1L2CGPPXLTVCABDlbXt/Zz8YA= +k8s.io/kubernetes v1.35.0 h1:PUOojD8c8E3csMP5NX+nLLne6SGqZjrYCscptyBfWMY= +k8s.io/kubernetes v1.35.0/go.mod h1:Tzk9Y9W/XUFFFgTUVg+BAowoFe+Pc7koGLuaiLHdcFg= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE= +k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= kubevirt.io/api v1.2.0 h1:1f8XQLPl4BuHPsc6SHTPnYSYeDxucKCQGa8CdrGJSRc= kubevirt.io/api v1.2.0/go.mod h1:SbeR9ma4EwnaOZEUkh/lNz0kzYm5LPpEDE30vKXC5Zg= kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 h1:IWo12+ei3jltSN5jQN1xjgakfvRSF3G3Rr4GXVOOy2I= @@ -1087,17 +1152,22 @@ kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.16.1 h1:+15lzrmHsE0s2kNl0Dl8cTchI5Cs8qofo5PGcPrV9z0= -sigs.k8s.io/controller-runtime v0.16.1/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= -sigs.k8s.io/controller-tools v0.11.4 h1:jqXJ/Xb6yBgbgcBbw1YoC3rC+Bt1XZWiLjj0ZHv/GrU= -sigs.k8s.io/controller-tools v0.11.4/go.mod h1:qcfX7jfcfYD/b7lAhvqAyTbt/px4GpvN88WKLFFv7p8= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-tools v0.17.2 h1:jNFOKps8WnaRKZU2R+4vRCHnXyJanVmXBWqkuUPFyFg= +sigs.k8s.io/controller-tools v0.17.2/go.mod h1:4q5tZG2JniS5M5bkiXY2/potOiXyhoZVw/U48vLkXk0= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/knftables v0.0.18 h1:6Duvmu0s/HwGifKrtl6G3AyAPYlWiZqTgS8bkVMiyaE= +sigs.k8s.io/knftables v0.0.18/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/pkg/constant/k8s.go b/pkg/constant/k8s.go index 3d7258bc69..728b5f89da 100644 --- a/pkg/constant/k8s.go +++ b/pkg/constant/k8s.go @@ -101,8 +101,9 @@ const ( AnnoDefaultRouteInterface = AnnotationPre + "/default-route-nic" //dra - DraAnnotationPre = "dra.spidernet.io" - AnnoDraCdiVersion = AnnotationPre + "/cdi-version" + DraAnnotationPre = "dra.spidernet.io" + AnnoDraCdiVersion = AnnotationPre + "/cdi-version" + AnnoDRAPodNetworkStatus = AnnotationPre + "/network-status" // webhook PodMutatingWebhookName = "pods.spiderpool.spidernet.io" @@ -129,6 +130,7 @@ const ( KindSpiderReservedIP = "SpiderReservedIP" KindSpiderCoordinator = "SpiderCoordinator" KindSpiderMultusConfig = "SpiderMultusConfig" + KindSpiderCNIConfig = "SpiderCNIConfig" KindSpiderClaimParameter = "SpiderClaimParameter" ) @@ -193,12 +195,21 @@ const ( DRACDIVendor = "k8s." + DRADriverName DRACDIClass = "claim" DRACDIKind = DRACDIVendor + "/" + DRACDIClass - DRADriverName = "netresources.spidernet.io" + DRADriverName = "dra.spidernet.io" + DRACNIDeviceClass = "dra-static-nic.spidernet.io" + DRANRIDeviceClass = "dra-dynamic-nic.spidernet.io" DRAPluginRegistrationPath = "/var/lib/kubelet/plugins_registry/" + DRADriverName + ".sock" DRADriverPluginPath = "/var/lib/kubelet/plugins/" + DRADriverName DRADriverPluginSocketPath = DRADriverPluginPath + "/plugin.sock" ) +// env + +const ( + ENV_SPIDERPOOL_NODENAME = "SPIDERPOOL_NODE_NAME" + ENV_SPIDERPOOL_AGENT_NAMESPACE = "SPIDERPOOL_AGENT_NAMESPACE" +) + // spiderpool cleaning sriov const ( SriovNetworkOperatorAPIGroup = "sriovnetwork.openshift.io" diff --git a/pkg/coordinatormanager/calico_ippool_informer.go b/pkg/coordinatormanager/calico_ippool_informer.go index 1881ab1dd7..aebfd97245 100644 --- a/pkg/coordinatormanager/calico_ippool_informer.go +++ b/pkg/coordinatormanager/calico_ippool_informer.go @@ -9,6 +9,7 @@ import ( calicov1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -18,35 +19,48 @@ import ( "github.com/spidernet-io/spiderpool/pkg/constant" ) -func NewCalicoIPPoolController(mgr ctrl.Manager, workQueue workqueue.RateLimitingInterface) (controller.Controller, error) { +var calicoController controller.Controller + +func NewCalicoIPPoolController(mgr ctrl.Manager, workqueue workqueue.TypedRateLimitingInterface[string]) (controller.Controller, error) { if mgr == nil { return nil, fmt.Errorf("controller-runtime manager %w", constant.ErrMissingRequiredParam) } r := &calicoIPPoolReconciler{ client: mgr.GetClient(), - spiderCoordinatorWorkqueue: workQueue, + spiderCoordinatorWorkqueue: workqueue, } - c, err := controller.NewUnmanaged(constant.KindSpiderCoordinator, mgr, controller.Options{Reconciler: r}) - if err != nil { - return nil, err + var err error + if calicoController == nil { + // only new one controller, avoid duplicate controller + // // controller with name %s already exists. Controller names must be unique to avoid multiple controllers reporting to the same metric + calicoController, err = controller.New(constant.KindSpiderCoordinator, mgr, controller.Options{Reconciler: r, SkipNameValidation: ptr.To(true)}) + if err != nil { + return nil, err + } } - if err := c.Watch(source.Kind(mgr.GetCache(), &calicov1.IPPool{}), &handler.EnqueueRequestForObject{}); err != nil { + if err := calicoController.Watch( + source.Kind[*calicov1.IPPool]( + mgr.GetCache(), + &calicov1.IPPool{}, + &handler.TypedEnqueueRequestForObject[*calicov1.IPPool]{}, + ), + ); err != nil { return nil, err } - return c, nil + return calicoController, nil } type calicoIPPoolReconciler struct { client client.Client - spiderCoordinatorWorkqueue workqueue.RateLimitingInterface + spiderCoordinatorWorkqueue workqueue.TypedRateLimitingInterface[string] } func (r *calicoIPPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { InformerLogger.Sugar().Debugf("Watched Calico IPPool %v Enqueued", req.Name) - r.spiderCoordinatorWorkqueue.Add(fmt.Sprintf("CalicoIPPool/%v", req.Name)) + r.spiderCoordinatorWorkqueue.Add(req.Name) return ctrl.Result{}, nil } diff --git a/pkg/coordinatormanager/coordinator_informer.go b/pkg/coordinatormanager/coordinator_informer.go index f90a38aa50..1511efb50a 100644 --- a/pkg/coordinatormanager/coordinator_informer.go +++ b/pkg/coordinatormanager/coordinator_informer.go @@ -20,7 +20,7 @@ import ( calicov1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -28,10 +28,10 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" - networkingInformer "k8s.io/client-go/informers/networking/v1alpha1" + networkingInformer "k8s.io/client-go/informers/networking/v1" "k8s.io/client-go/kubernetes" corelister "k8s.io/client-go/listers/core/v1" - networkingLister "k8s.io/client-go/listers/networking/v1alpha1" + networkingLister "k8s.io/client-go/listers/networking/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" @@ -98,7 +98,7 @@ type CoordinatorController struct { // only not to nil if the cilium multu-pool is enabled CiliumIPPoolsSynced cache.InformerSynced - Workqueue workqueue.RateLimitingInterface + Workqueue workqueue.TypedRateLimitingInterface[string] LeaderRetryElectGap time.Duration ResyncPeriod time.Duration @@ -125,6 +125,7 @@ func (cc *CoordinatorController) SetupInformer( } InformerLogger = logutils.Logger.Named("Coordinator-Informer") + cc.Workqueue = workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()) go func() { for { @@ -157,8 +158,6 @@ func (cc *CoordinatorController) SetupInformer( } }() - cc.Workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), constant.KindSpiderCoordinator) - if err := cc.StartWatchPodCIDR(innerCtx, InformerLogger); err != nil { InformerLogger.Error(err.Error()) continue @@ -170,7 +169,7 @@ func (cc *CoordinatorController) SetupInformer( err := cc.addEventHandlers( spiderInformerFactory.Spiderpool().V2beta1().SpiderCoordinators(), k8sInformerFactory.Core().V1().ConfigMaps(), - k8sInformerFactory.Networking().V1alpha1().ServiceCIDRs(), + k8sInformerFactory.Networking().V1().ServiceCIDRs(), ) if err != nil { InformerLogger.Error(err.Error()) @@ -238,7 +237,7 @@ func (cc *CoordinatorController) addEventHandlers( } InformerLogger.Debug("Checking if the ServiceCIDR is available in your cluster") - var serviceCIDR networkingv1alpha1.ServiceCIDRList + var serviceCIDR networkingv1.ServiceCIDRList err = cc.APIReader.List(context.TODO(), &serviceCIDR) if err != nil { InformerLogger.Warn("ServiceCIDR feature is unavailable in your cluster, Don't start the serviceCIDR informer") @@ -256,23 +255,23 @@ func (cc *CoordinatorController) addEventHandlers( func (cc *CoordinatorController) addServiceCIDRHandler(serviceCIDRInformer cache.SharedIndexInformer) error { _, err := serviceCIDRInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - serviceCidr := obj.(*networkingv1alpha1.ServiceCIDR) + serviceCidr := obj.(*networkingv1.ServiceCIDR) logger := InformerLogger.With( zap.String("ServiceCIDRName", serviceCidr.Name), zap.String("Operation", "Add"), ) - cc.Workqueue.Add(fmt.Sprintf("ServiceCIDR/%v", serviceCidr.Name)) + cc.Workqueue.Add(serviceCidr.Name) logger.Debug(messageEnqueueCoordiantor) }, DeleteFunc: func(obj interface{}) { - serviceCidr := obj.(*networkingv1alpha1.ServiceCIDR) + serviceCidr := obj.(*networkingv1.ServiceCIDR) logger := InformerLogger.With( zap.String("ServiceCIDRName", serviceCidr.Name), zap.String("Operation", "Del"), ) - cc.Workqueue.Add(fmt.Sprintf("ServiceCIDR/%v", serviceCidr.Name)) + cc.Workqueue.Add(serviceCidr.Name) logger.Debug(messageEnqueueCoordiantor) }, }) @@ -285,18 +284,18 @@ func (cc *CoordinatorController) addServiceCIDRHandler(serviceCIDRInformer cache return nil } -func (cc *CoordinatorController) enqueueCoordinatorOnAdd(obj interface{}) { +func (cc *CoordinatorController) enqueueCoordinatorOnAdd(obj any) { coord := obj.(*spiderpoolv2beta1.SpiderCoordinator) logger := InformerLogger.With( zap.String("CoordinatorName", coord.Name), zap.String("Operation", "ADD"), ) - cc.Workqueue.Add(fmt.Sprintf("SpiderCoordinator/%v", coord.Name)) + cc.Workqueue.Add(coord.Name) logger.Debug(messageEnqueueCoordiantor) } -func (cc *CoordinatorController) enqueueCoordinatorOnUpdate(oldObj, newObj interface{}) { +func (cc *CoordinatorController) enqueueCoordinatorOnUpdate(oldObj, newObj any) { oldCoord := oldObj.(*spiderpoolv2beta1.SpiderCoordinator) newCoord := newObj.(*spiderpoolv2beta1.SpiderCoordinator) logger := InformerLogger.With( @@ -312,7 +311,7 @@ func (cc *CoordinatorController) enqueueCoordinatorOnUpdate(oldObj, newObj inter "Pod CIDR type changed from %s to %s", *oldCoord.Spec.PodCIDRType, *newCoord.Spec.PodCIDRType, ) logger.Sugar().Infof("PodCIDRtype changed from %s to %s", *oldCoord.Spec.PodCIDRType, *newCoord.Spec.PodCIDRType) - cc.Workqueue.Add(fmt.Sprintf("SpiderCoordinator/%v", newCoord.Name)) + cc.Workqueue.Add(newCoord.Name) logger.Debug(messageEnqueueCoordiantor) return } @@ -322,11 +321,11 @@ func (cc *CoordinatorController) enqueueCoordinatorOnUpdate(oldObj, newObj inter return } - cc.Workqueue.Add(fmt.Sprintf("SpiderCoordinator/%v", newCoord.Name)) + cc.Workqueue.Add(newCoord.Name) logger.Debug(messageEnqueueCoordiantor) } -func (cc *CoordinatorController) enqueueCoordinatorOnConfigMapAdd(obj interface{}) { +func (cc *CoordinatorController) enqueueCoordinatorOnConfigMapAdd(obj any) { cm := obj.(*corev1.ConfigMap) if cm.Name == ciliumConfig || cm.Name == kubeadmConfigMap { logger := InformerLogger.With( @@ -334,7 +333,7 @@ func (cc *CoordinatorController) enqueueCoordinatorOnConfigMapAdd(obj interface{ zap.String("Operation", "Add"), ) - cc.Workqueue.Add(fmt.Sprintf("ConfigMap/%v", cm.Name)) + cc.Workqueue.Add(cm.Name) logger.Debug(messageEnqueueCoordiantor) } } @@ -352,7 +351,7 @@ func (cc *CoordinatorController) enqueueCoordinatorOnConfigMapUpdated(oldObj, ne zap.String("Operation", "UPDATE"), ) - cc.Workqueue.Add(fmt.Sprintf("ConfigMap/%v", newCm.Name)) + cc.Workqueue.Add(newCm.Name) logger.Debug(messageEnqueueCoordiantor) } } @@ -365,7 +364,7 @@ func (cc *CoordinatorController) enqueueCoordinatorOnConfigMapDeleted(obj interf zap.String("Operation", "DEL"), ) - cc.Workqueue.Add(fmt.Sprintf("ConfigMap/%v", cm.Name)) + cc.Workqueue.Add(cm.Name) logger.Debug(messageEnqueueCoordiantor) } } @@ -412,7 +411,7 @@ func (cc *CoordinatorController) processNextWorkItem(ctx context.Context) bool { defer cc.Workqueue.Done(obj) logger := logutils.FromContext(ctx).With( - zap.String("Event Key", obj.(string)), + zap.String("Event Key", obj), zap.String("Operation", "PROCESS"), ) @@ -590,6 +589,7 @@ func (cc *CoordinatorController) WatchCalicoIPPools(ctx context.Context, logger logger.Info("Starting Calico IPPool controller") if err := calicoController.Start(ctx); err != nil { logger.Sugar().Errorf("Failed to start Calico IPPool controller: %v", err) + return } logger.Info("Shutdown Calico IPPool controller") }() @@ -667,7 +667,7 @@ func (cc *CoordinatorController) updateCiliumPodCIDR(k8sPodCIDR []string, coordi ipam := ccm.Data["ipam"] switch ipam { - case option.IPAMClusterPool, option.IPAMClusterPoolV2: + case option.IPAMClusterPool: var podCIDR []string v4, ok := ccm.Data["cluster-pool-ipv4-cidr"] if ok { diff --git a/pkg/dra/config.go b/pkg/dra/config.go new file mode 100644 index 0000000000..394fa9197d --- /dev/null +++ b/pkg/dra/config.go @@ -0,0 +1,110 @@ +package dra + +import ( + "encoding/json" + "fmt" + + resourcev1 "k8s.io/api/resource/v1" +) + +type NetworkConfig struct { + // MultusNamespace is the namespace where the MultusConfig CRs are located + MultusNamespace string `json:"multusNamespace"` + // DefaultNic is the default MultusConfig to be used, usually the primary network interface + // in k8s, the default network interface is usually named "eth0" + DefaultNic *MultusConfig `json:"defaultNic"` + // SecondaryNics is the secondary MultusConfig to be used + // usually the secondary network interface is usually named "net1" + SecondaryNics *SecondaryNic `json:"secondaryNics"` +} + +type SecondaryNic struct { + // StaticNics is the static MultusConfig to be used + StaticNics []*MultusConfig `json:"staticNics"` + // DynamicNics is the dynamic MultusConfig to be used via + // gpu affinity + DynamicNics *DynamicNic `json:"dynamicNics"` +} + +type DynamicNic struct { + // GPUAffinityPolicy can be "best" or "all" + GPUAffinityPolicy string `json:"gpuAffinityPolicy"` + // PotentialMultusConfigs is a list of MultusConfig names that the dynamic NIC can be allocated to + // empty means all MultusConfig can be used. + PotentialMultusConfigs []string `json:"potentialMultusConfigs"` +} + +type MultusConfig struct { + // MultusName is the name of the MultusConfig + MultusName string `json:"multusName"` + // DefaultRoute is whether the MultusConfig is the default route + DefaultRoute bool `json:"defaultRoute"` +} + +// ParseNetworkConfig gets the network config from resource claim opaqueConfig +func ParseNetworkConfig(configs []resourcev1.DeviceClaimConfiguration) (*NetworkConfig, error) { + // parse the resourceclaim network config + var multusConfig *NetworkConfig + for _, config := range configs { + if config.DeviceConfiguration.Opaque.Driver != "OUR_DRADRIVER_NAME" { + continue + } + if config.DeviceConfiguration.Opaque == nil { + continue + } + + if err := json.Unmarshal(config.DeviceConfiguration.Opaque.Parameters.Raw, &multusConfig); err != nil { + return nil, err + } + break + } + + if multusConfig == nil { + return nil, fmt.Errorf("failed to get network config from resource claim") + } + + return multusConfig, nil +} + +// func ParseToAnnotations(annotations map[string]string) { +// if annotations == nil { +// annotations = make(map[string]string) +// } + +// if d.DefaultNic != nil { +// annotations[constant.MultusDefaultNetAnnot] = MultusAnnotationValue(d.MultusNamespace, d.DefaultNic.MultusName) +// } + +// if d.SecondaryNics != nil { +// for idx, nic := range d.SecondaryNics.StaticNics { +// if nic == nil { +// continue +// } +// // by default, the default route is locatee at the first nic of the pod(eth0). +// // we can configure the default route to the second nics of the pod via annotations +// // e.g. +// // annotations: +// // ipam.spidernet.io/default-route-nic: net1 +// // In multus, the multi-nic is formatted as "net1", "net2", etc. +// // Note: we expect only one nic to be the default route, if we configure DefaultRoute to +// // true for multi-nic, the first nic only be selected. +// if nic.DefaultRoute && annotations[constant.AnnoDefaultRouteInterface] == "" { +// annotations[constant.AnnoDefaultRouteInterface] = fmt.Sprintf("net%d", idx+1) +// } + +// if idx == 0 { +// annotations[constant.MultusNetworkAttachmentAnnot] = MultusAnnotationValue(d.MultusNamespace, nic.MultusName) +// continue +// } +// annotations[constant.MultusNetworkAttachmentAnnot] = annotations[constant.MultusNetworkAttachmentAnnot] + "," + MultusAnnotationValue(d.MultusNamespace, nic.MultusName) +// } +// } +// } + +func (d *NetworkConfig) GetResourceNames() []string { + return nil +} + +func MultusAnnotationValue(namespace, name string) string { + return fmt.Sprintf("%s/%s", namespace, name) +} diff --git a/pkg/dra/device_state.go b/pkg/dra/device_state.go new file mode 100644 index 0000000000..b682e5df4f --- /dev/null +++ b/pkg/dra/device_state.go @@ -0,0 +1,252 @@ +package dra + +import ( + "context" + "fmt" + "net" + "regexp" + "strings" + + "github.com/Mellanox/rdmamap" + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/pkg/networking/networking" + "github.com/spidernet-io/spiderpool/pkg/utils" + "github.com/vishvananda/netlink" + "go.uber.org/zap" + resourceapi "k8s.io/api/resource/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/utils/ptr" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +type DeviceState struct { + namespace string + logger *zap.Logger + client client.Client +} + +func (d *DeviceState) Init(logger *zap.Logger, client client.Client) (*DeviceState, error) { + d.namespace = utils.GetAgentNamespace() + d.logger = logger + d.client = client + return d, nil +} + +// GetNetDevices get all net devices from the node, the attributes of every devices +// should be included but not limited to: +// isRdma, isSriov, gpuAffinity, ipaddress, macaddress, bandwidth +// type(ib/eth),vendor,device, pciAddress, etc. +func (d *DeviceState) GetNetDevices() []resourceapi.Device { + links, err := netlink.LinkList() + if err != nil { + return nil + } + + var devices []resourceapi.Device + for _, link := range links { + isVirtual, err := networking.IsVirtualNetDevice(link.Attrs().Name) + if err != nil { + d.logger.Debug("Failed to check if netdev is virtual device", zap.String("netdev", link.Attrs().Name), zap.Error(err)) + continue + } + // skip virtual device but not vlan type + if isVirtual && (link.Type() != "vlan") { + d.logger.Sugar().Debugf("netdev %s is virtual device, skip add to resource slices", link.Attrs().Name) + continue + } + + isVf := networking.IsSriovVfForNetDev(link.Attrs().Name) + if isVf { + d.logger.Sugar().Debugf("netdev %s is sriov vf, skip to add to resource slices", link.Attrs().Name) + continue + } + devices = append(devices, d.getNetDevice(link)) + } + return devices +} + +func (d *DeviceState) getNetDevice(link netlink.Link) resourceapi.Device { + device := resourceapi.Device{ + Name: link.Attrs().Name, + AllowMultipleAllocations: ptr.To(true), + Attributes: make(map[resourceapi.QualifiedName]resourceapi.DeviceAttribute), + Capacity: make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity), + } + + // make sure the ifname is an valid dns1123 label, if not normalize it + if len(validation.IsDNS1123Label(link.Attrs().Name)) > 0 { + device.Name = NormalizedDNS1123Label(link.Attrs().Name) + d.logger.Sugar().Debugf("iface %s is invalid DNS1123 label, normalized to %s", link.Attrs().Name, device.Name) + } + + d.addBasicAttributesForNetDev(link, device.Attributes) + d.addRDMATopoAttributes(link.Attrs().Name, device.Attributes) + // pci attributes + d.addPCIAttributesForNetDev(link.Attrs().Name, device.Attributes) + // bandwidth attributes + d.addBandwidthAttributesForNetDev(link.Attrs().Name, device.Attributes) + d.addSpiderMultusConfigAttributesForNetDev(link.Attrs().Name, device.Attributes) + d.addConsumableCapacityForNetDev(&device) + return device +} + +func (d *DeviceState) addConsumableCapacityForNetDev(device *resourceapi.Device) { + // get sriov vf totalcount + totalVfs, err := networking.GetSriovTotalVfsForNetDev(device.Name) + if err != nil { + d.logger.Error("Failed to get sriov vf count for netdev", zap.String("iface", device.Name), zap.Error(err)) + } + device.Capacity[resourceapi.QualifiedName("capacity.spidernet.io/vfs")] = resourceapi.DeviceCapacity{ + Value: resource.MustParse(fmt.Sprintf("%d", totalVfs)), + } +} + +func (d *DeviceState) addPCIAttributesForNetDev(iface string, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + // get vendor id, device id and pci address from sysfs + // deviceId, err := networking.GetPciDeviceIdForNetDev(iface) + // if err != nil { + // d.logger.Error("Failed to get PCI deviceId for netdev", zap.String("iface", iface), zap.Error(err)) + // } + // device.Attributes["device"] = resourceapi.DeviceAttribute{StringValue: ptr.To(deviceId)} + + // vendor, err := networking.GetPciVendorForNetDev(iface) + // if err != nil { + // d.logger.Error("Failed to get PCI vendor for netdev", zap.String("iface", iface), zap.Error(err)) + // } + // device.Attributes["vendor"] = resourceapi.DeviceAttribute{StringValue: ptr.To(vendor)} + + // get pci address from sysfs + pciAddress, err := networking.GetPciAddessForNetDev(iface) + if err != nil { + d.logger.Error("Failed to get PCI address for netdev", zap.String("iface", iface), zap.Error(err)) + } + device["pciAddress"] = resourceapi.DeviceAttribute{StringValue: ptr.To(pciAddress)} + + // sriov-related attributes + // first check if the netdev is sriov pf or sriov vf + isSriovPf, err := networking.IsSriovPfForNetDev(iface) + if err != nil { + d.logger.Sugar().Debugf("Failed to check if netdev %s is sriov pf", iface, zap.Error(err)) + } + device[resourceapi.QualifiedName("isSriovPf")] = resourceapi.DeviceAttribute{BoolValue: ptr.To(isSriovPf)} + + // if isSriovPf { + // // get available vf pci addresses + // availableVfPciAddresses, err := networking.GetSriovAvailableVfPciAddressesForNetDev(iface) + // if err != nil { + // d.logger.Error("Failed to get available sriov vf pci addresses for netdev", zap.String("iface", iface), zap.Error(err)) + // } + // // get available vf count + // device["availableVfs"] = resourceapi.DeviceAttribute{IntValue: ptr.To(int64(len(availableVfPciAddresses)))} + // } + + // device.Attributes["vfPciAddressPrefix"] = resourceapi.DeviceAttribute{StringValue: ptr.To(GetPciAddressPrefix(pciAddress))} + // deviceVfList, err := networking.GetVFList(pciAddress) + // if err != nil { + // d.logger.Error("Failed to get sriov vf list for netdev", zap.String("iface", iface), zap.Error(err)) + // } + // // NOTE: spec.devices[5].basic.attributes[vfPciAddresses].string: Too long: may not be more than 64 bytes" + // device.Attributes["allVfPciAddressSuffix"] = resourceapi.DeviceAttribute{StringValue: ptr.To(strings.Join(deviceVfList, ","))} + + // // the value Must not be longer than 64 characters + // device.Attributes["availableVfPciAddressSuffix"] = resourceapi.DeviceAttribute{StringValue: ptr.To(strings.Join(availableVfPciAddresses, ","))} +} + +func (d *DeviceState) addBasicAttributesForNetDev(link netlink.Link, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + linkAttrs := link.Attrs() + device["linkType"] = resourceapi.DeviceAttribute{StringValue: ptr.To(link.Type())} + if link.Type() == "device" { + device["linkType"] = resourceapi.DeviceAttribute{StringValue: ptr.To("ethernet")} + } + device["name"] = resourceapi.DeviceAttribute{StringValue: ptr.To(linkAttrs.Name)} + device["mtu"] = resourceapi.DeviceAttribute{IntValue: ptr.To(int64(linkAttrs.MTU))} + device["state"] = resourceapi.DeviceAttribute{StringValue: ptr.To(linkAttrs.OperState.String())} + device["mac"] = resourceapi.DeviceAttribute{StringValue: ptr.To(linkAttrs.HardwareAddr.String())} + isRDMA := rdmamap.IsRDmaDeviceForNetdevice(linkAttrs.Name) + if isRDMA { + d.addGPUAffinityAttributesForNetDev(link.Attrs().Name, device) + } + device["rdma"] = resourceapi.DeviceAttribute{BoolValue: &isRDMA} + d.addIPAddressAttributesForNetDev(link, device) +} + +func (d *DeviceState) addIPAddressAttributesForNetDev(link netlink.Link, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + d.logger.Sugar().Errorf("Failed to get addresses for netdev %s", link.Attrs().Name, zap.Error(err)) + device["ipv4CIDR"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} + device["ipv6CIDR"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} + return + } + + for _, addr := range addrs { + if addr.IP.IsMulticast() || addr.IP.IsLinkLocalUnicast() { + continue + } + + // addr.IPNet.String() => 10.6.1.1/24, not 10.6.1.0/24 + ipNetString := addr.IPNet.String() + _, ipnet, err := net.ParseCIDR(ipNetString) + if err != nil { + d.logger.Sugar().Errorf("Failed to parse CIDR for netdev %s", link.Attrs().Name, zap.Error(err)) + continue + } + + if ipnet.IP.To4() != nil { + device["ipv4CIDR"] = resourceapi.DeviceAttribute{StringValue: ptr.To(ipnet.String())} + } + if ipnet.IP.To4() == nil { + device["ipv6CIDR"] = resourceapi.DeviceAttribute{StringValue: ptr.To(ipnet.String())} + } + } +} + +func (d *DeviceState) addBandwidthAttributesForNetDev(iface string, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + bandwidth, err := networking.GetNetdevBandwidth(iface) + if err != nil { + d.logger.Sugar().Debugf("Failed to get bandwidth for netdev %s: %v", iface, err) + } + + device["bandwidthGbps"] = resourceapi.DeviceAttribute{ + StringValue: ptr.To(fmt.Sprintf("%d", bandwidth/1000)), + } +} + +func (d *DeviceState) addRDMATopoAttributes(iface string, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + device["topoZone"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} +} + +func (d *DeviceState) addGPUAffinityAttributesForNetDev(iface string, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + gdrGpus, err := networking.GetGdrGpusForNetDevice(iface) + if err != nil { + d.logger.Sugar().Errorf("Failed to get GDR GPUs for netdev %s: %v", iface, err) + } + device["gdrAffinityGpus"] = resourceapi.DeviceAttribute{StringValue: ptr.To(strings.Join(gdrGpus, ","))} + //device.Attributes["PHBAffinityGpus"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} + // device.Attributes["SYSAffinityGpus"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} + // device.Attributes["NODEAffinityGpus"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} +} + +func (d *DeviceState) addSpiderMultusConfigAttributesForNetDev(iface string, device map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { + // TODO(@cyclinder): spider multus config attributes + var cniConfigs []string + + // Use client interface instead of direct API calls to reduce API pressure + var configList spiderpoolv2beta1.SpiderMultusConfigList + if err := d.client.List(context.Background(), &configList, &client.ListOptions{Namespace: d.namespace}); err != nil { + d.logger.Sugar().Errorf("Failed to list spider multus configs: %v", err) + device["cniConfigs"] = resourceapi.DeviceAttribute{StringValue: ptr.To("")} + return + } + + // Match spider multus config name with netdev name + // e.g. enp11s0f0np0-macvlan0, enp11s0f1np1-sriov1 + pattern := regexp.MustCompile(fmt.Sprintf(".*%s.*", regexp.QuoteMeta(iface))) + for _, config := range configList.Items { + if pattern.MatchString(config.Name) { + cniConfigs = append(cniConfigs, config.Name) + } + } + device["cniConfigs"] = resourceapi.DeviceAttribute{StringValue: ptr.To(strings.Join(cniConfigs, ","))} +} diff --git a/pkg/dra/driver.go b/pkg/dra/driver.go new file mode 100644 index 0000000000..9444b7a87d --- /dev/null +++ b/pkg/dra/driver.go @@ -0,0 +1,198 @@ +package dra + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/dra/nri" + "github.com/spidernet-io/spiderpool/pkg/logutils" + "github.com/spidernet-io/spiderpool/pkg/utils" + + "go.uber.org/zap" + + resourcev1 "k8s.io/api/resource/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/dynamic-resource-allocation/kubeletplugin" + "k8s.io/dynamic-resource-allocation/resourceslice" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + kubeletPluginRegistryPath = "/var/lib/kubelet/plugins_registry" + kubeletPluginPath = "/var/lib/kubelet/plugins" +) + +type Driver struct { + nodeName string + logger *zap.Logger + kubeClient kubernetes.Interface + draPlugin *kubeletplugin.Helper + client client.Client + state *DeviceState +} + +// NewDriver creates a new DRA driver. +func NewDriver(ctx context.Context, client client.Client, clientSet kubernetes.Interface, enableNri bool) (*Driver, error) { + var err error + d := &Driver{ + logger: logutils.Logger.Named("dra"), + client: client, + state: &DeviceState{}, + } + + nodeName := utils.GetNodeName() + if nodeName == "" { + return nil, fmt.Errorf("env %s is not set", constant.ENV_SPIDERPOOL_NODENAME) + } + d.nodeName = nodeName + + err = os.MkdirAll(constant.DRADriverPluginPath, 0750) + if err != nil { + return nil, fmt.Errorf("failed to create plugin path %s: %v", constant.DRADriverPluginSocketPath, err) + } + + d.state, err = d.state.Init(d.logger, client) + if err != nil { + return nil, err + } + + d.draPlugin, err = kubeletplugin.Start(ctx, + d, + kubeletplugin.NodeName(nodeName), + kubeletplugin.KubeClient(clientSet), + kubeletplugin.DriverName(constant.DRADriverName), + kubeletplugin.RegistrarDirectoryPath(kubeletPluginRegistryPath), + kubeletplugin.PluginDataDirectoryPath(constant.DRADriverPluginPath), + ) + if err != nil { + return nil, err + } + go d.PublishResources(ctx) + + if enableNri { + err = nri.Run(ctx, client, nodeName) + if err != nil { + return nil, err + } + } + + return d, nil +} + +func (d *Driver) PrepareResourceClaims(ctx context.Context, claims []*resourcev1.ResourceClaim) (map[types.UID]kubeletplugin.PrepareResult, error) { + d.logger.Info("PrepareResourceClaims is called", zap.Any("claims", claims)) + nri.GetCache().WarmupNode(ctx, d.client, utils.GetNodeName(), utils.GetAgentNamespace()) + result := make(map[types.UID]kubeletplugin.PrepareResult) + for _, c := range claims { + nri.GetCache().SetResourceClaim(c) + nri.GetCache().IndexPodClaimsFromResourceClaim(c) + result[c.UID] = d.nodePrepareResource(ctx, c) + } + return result, nil +} + +func (d *Driver) UnprepareResourceClaims(ctx context.Context, claims []kubeletplugin.NamespacedObject) (map[types.UID]error, error) { + d.logger.Info("UnprepareResourceClaims is called", zap.Any("claims", claims)) + result := make(map[types.UID]error) + for _, c := range claims { + result[c.UID] = d.nodeUnprepareResource(ctx, c) + } + return result, nil +} + +func (d *Driver) HandleError(ctx context.Context, err error, msg string) { + // See: https://pkg.go.dev/k8s.io/apimachinery/pkg/util/runtime#HandleErrorWithContext + runtime.HandleErrorWithContext(ctx, err, msg) +} + +func (d *Driver) nodePrepareResource(ctx context.Context, claim *resourcev1.ResourceClaim) kubeletplugin.PrepareResult { + if claim.Status.Allocation == nil { + return kubeletplugin.PrepareResult{ + Err: fmt.Errorf("resource claim '%s/%s' is not allocated", claim.Namespace, claim.Name), + } + } + + return kubeletplugin.PrepareResult{} +} + +func (d *Driver) nodeUnprepareResource(ctx context.Context, claim kubeletplugin.NamespacedObject) error { + rc := &resourcev1.ResourceClaim{} + if err := d.client.Get(ctx, client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, rc); err == nil { + for _, consumer := range rc.Status.ReservedFor { + if consumer.Resource != "pods" { + continue + } + if consumer.UID != "" { + nri.GetCache().DeletePodClaimIndexByUID(string(consumer.UID)) + } + if consumer.Name != "" { + nri.GetCache().DeletePodClaimIndexByNSName(rc.Namespace, consumer.Name) + } + } + nri.GetCache().DeleteResourceClaim(rc.Namespace, rc.Name) + } + return nil +} + +// PublishResources periodically publishes the available SR-IOV resources +func (d *Driver) PublishResources(ctx context.Context) { + devices := d.state.GetNetDevices() + resources := resourceslice.DriverResources{ + Pools: map[string]resourceslice.Pool{ + "default": { + Slices: []resourceslice.Slice{ + { + Devices: devices, + }, + }, + }, + }, + } + if err := d.draPlugin.PublishResources(ctx, resources); err != nil { + d.logger.Error("failed to publish resources", zap.Error(err)) + } else { + d.logger.Info("Published DRA resources") + } + + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + d.logger.Info("receive ctx done, stop publishing resources") + return + case <-ticker.C: + // TODO: we should use netlink.LinkSubscribe to watch any changes of the netlink + // if one device is allocated/deallocated to a pod, we can update the device state in time + // which make sure the same device will not be allocated to different pods + // get the latest state of the netlink + devices := d.state.GetNetDevices() + resources := resourceslice.DriverResources{ + Pools: map[string]resourceslice.Pool{ + "default": { + Slices: []resourceslice.Slice{ + { + Devices: devices, + }, + }, + }, + }, + } + if err := d.draPlugin.PublishResources(ctx, resources); err != nil { + d.logger.Error("failed to publish resources", zap.Error(err)) + } + } + } +} + +func (d *Driver) Stop() { + if d.draPlugin != nil { + d.draPlugin.Stop() + } +} diff --git a/pkg/dra/nri/cache.go b/pkg/dra/nri/cache.go new file mode 100644 index 0000000000..ad8dd3056c --- /dev/null +++ b/pkg/dra/nri/cache.go @@ -0,0 +1,407 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/containernetworking/cni/libcni" + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/spidernet-io/spiderpool/pkg/constant" + resourcev1 "k8s.io/api/resource/v1" + podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Cache struct { + mu sync.RWMutex + + resourceSliceByNode map[string]*resourcev1.ResourceSlice + resourceSliceTS map[string]time.Time + + nadConfigByKey map[string]string + nadTS map[string]time.Time + + confListByKey map[string]*libcni.NetworkConfigList + confListTS map[string]time.Time + + resourceClaimByKey map[string]*resourcev1.ResourceClaim + resourceClaimTS map[string]time.Time + + podClaimsByUID map[string]map[string]struct{} + podClaimsByNSName map[string]map[string]struct{} + + podNetworkStatusByUID map[string][]*NetworkStatus + podNetworkStatusTS map[string]time.Time + + podResourcesList *podresourcesapi.ListPodResourcesResponse + podResourcesTS time.Time + + nodeWarmupTS map[string]time.Time +} + +var defaultCache = NewCache() + +func NewCache() *Cache { + return &Cache{ + resourceSliceByNode: map[string]*resourcev1.ResourceSlice{}, + resourceSliceTS: map[string]time.Time{}, + nadConfigByKey: map[string]string{}, + nadTS: map[string]time.Time{}, + confListByKey: map[string]*libcni.NetworkConfigList{}, + confListTS: map[string]time.Time{}, + resourceClaimByKey: map[string]*resourcev1.ResourceClaim{}, + resourceClaimTS: map[string]time.Time{}, + podClaimsByUID: map[string]map[string]struct{}{}, + podClaimsByNSName: map[string]map[string]struct{}{}, + podNetworkStatusByUID: map[string][]*NetworkStatus{}, + podNetworkStatusTS: map[string]time.Time{}, + nodeWarmupTS: map[string]time.Time{}, + } +} + +func claimKey(namespace, name string) string { + return namespace + "/" + name +} + +func podNSNameKey(namespace, name string) string { + return namespace + "/" + name +} + +func (c *Cache) SetResourceClaim(rc *resourcev1.ResourceClaim) { + if rc == nil || rc.Namespace == "" || rc.Name == "" { + return + } + key := claimKey(rc.Namespace, rc.Name) + copy := rc.DeepCopy() + c.mu.Lock() + defer c.mu.Unlock() + c.resourceClaimByKey[key] = copy + c.resourceClaimTS[key] = time.Now() +} + +func (c *Cache) GetResourceClaim(namespace, name string, maxAge time.Duration) (*resourcev1.ResourceClaim, bool) { + key := claimKey(namespace, name) + c.mu.RLock() + rc := c.resourceClaimByKey[key] + ts := c.resourceClaimTS[key] + c.mu.RUnlock() + if rc == nil { + return nil, false + } + if maxAge > 0 && time.Since(ts) > maxAge { + return nil, false + } + return rc, true +} + +func (c *Cache) DeleteResourceClaim(namespace, name string) { + key := claimKey(namespace, name) + c.mu.Lock() + defer c.mu.Unlock() + delete(c.resourceClaimByKey, key) + delete(c.resourceClaimTS, key) +} + +func (c *Cache) DeletePodClaimIndexByNSName(podNamespace, podName string) { + if podNamespace == "" || podName == "" { + return + } + key := podNSNameKey(podNamespace, podName) + c.mu.Lock() + defer c.mu.Unlock() + delete(c.podClaimsByNSName, key) +} + +func (c *Cache) DeletePodClaimIndexByUID(podUID string) { + if podUID == "" { + return + } + c.mu.Lock() + defer c.mu.Unlock() + delete(c.podClaimsByUID, podUID) +} + +func (c *Cache) IndexPodClaimsFromResourceClaim(rc *resourcev1.ResourceClaim) { + if rc == nil || rc.Namespace == "" || rc.Name == "" { + return + } + ck := claimKey(rc.Namespace, rc.Name) + c.mu.Lock() + defer c.mu.Unlock() + for _, consumer := range rc.Status.ReservedFor { + if consumer.Resource != "pods" { + continue + } + if consumer.UID != "" { + uid := string(consumer.UID) + if _, ok := c.podClaimsByUID[uid]; !ok { + c.podClaimsByUID[uid] = map[string]struct{}{} + } + c.podClaimsByUID[uid][ck] = struct{}{} + } + if consumer.Name != "" { + pk := podNSNameKey(rc.Namespace, consumer.Name) + if _, ok := c.podClaimsByNSName[pk]; !ok { + c.podClaimsByNSName[pk] = map[string]struct{}{} + } + c.podClaimsByNSName[pk][ck] = struct{}{} + } + } +} + +func (c *Cache) GetPodClaimRefs(podUID, podNamespace, podName string) ([]client.ObjectKey, bool) { + c.mu.RLock() + var set map[string]struct{} + if podUID != "" { + set = c.podClaimsByUID[podUID] + } + if set == nil && podNamespace != "" && podName != "" { + set = c.podClaimsByNSName[podNSNameKey(podNamespace, podName)] + } + c.mu.RUnlock() + if len(set) == 0 { + return nil, false + } + + refs := make([]client.ObjectKey, 0, len(set)) + for ck := range set { + parts := strings.SplitN(ck, "/", 2) + if len(parts) != 2 { + continue + } + refs = append(refs, client.ObjectKey{Namespace: parts[0], Name: parts[1]}) + } + return refs, len(refs) > 0 +} + +func GetCache() *Cache { + return defaultCache +} + +func (c *Cache) SetPodNetworkStatus(podUID string, status []*NetworkStatus) { + if podUID == "" { + return + } + c.mu.Lock() + defer c.mu.Unlock() + c.podNetworkStatusByUID[podUID] = status + c.podNetworkStatusTS[podUID] = time.Now() +} + +func (c *Cache) GetPodNetworkStatus(podUID string) ([]*NetworkStatus, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + status, ok := c.podNetworkStatusByUID[podUID] + return status, ok +} + +func (c *Cache) DeletePod(podUID string) { + if podUID == "" { + return + } + c.mu.Lock() + defer c.mu.Unlock() + delete(c.podNetworkStatusByUID, podUID) + delete(c.podNetworkStatusTS, podUID) + delete(c.podClaimsByUID, podUID) +} + +func (c *Cache) GetPodResourcesList(maxAge time.Duration) (*podresourcesapi.ListPodResourcesResponse, bool) { + c.mu.RLock() + resp := c.podResourcesList + ts := c.podResourcesTS + c.mu.RUnlock() + if resp == nil { + return nil, false + } + if maxAge > 0 && time.Since(ts) > maxAge { + return nil, false + } + return resp, true +} + +func (c *Cache) SetPodResourcesList(resp *podresourcesapi.ListPodResourcesResponse) { + if resp == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + c.podResourcesList = resp + c.podResourcesTS = time.Now() +} + +func (c *Cache) GetResourceSlice(nodeName string, maxAge time.Duration) (*resourcev1.ResourceSlice, bool) { + if nodeName == "" { + return nil, false + } + c.mu.RLock() + rs := c.resourceSliceByNode[nodeName] + ts := c.resourceSliceTS[nodeName] + c.mu.RUnlock() + if rs == nil { + return nil, false + } + if maxAge > 0 && time.Since(ts) > maxAge { + return nil, false + } + return rs, true +} + +func (c *Cache) SetResourceSlice(nodeName string, rs *resourcev1.ResourceSlice) { + if nodeName == "" || rs == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + c.resourceSliceByNode[nodeName] = rs + c.resourceSliceTS[nodeName] = time.Now() +} + +func nadKey(namespace, name string) string { + return namespace + "/" + name +} + +func confListKey(namespace, name, deviceID string) string { + return fmt.Sprintf("%s/%s@%s", namespace, name, deviceID) +} + +func (c *Cache) GetNADConfig(namespace, name string, maxAge time.Duration) (string, bool) { + key := nadKey(namespace, name) + c.mu.RLock() + cfg, ok := c.nadConfigByKey[key] + ts := c.nadTS[key] + c.mu.RUnlock() + if !ok { + return "", false + } + if maxAge > 0 && time.Since(ts) > maxAge { + return "", false + } + return cfg, true +} + +func (c *Cache) SetNADConfig(namespace, name, cfg string) { + if namespace == "" || name == "" || cfg == "" { + return + } + key := nadKey(namespace, name) + c.mu.Lock() + defer c.mu.Unlock() + c.nadConfigByKey[key] = cfg + c.nadTS[key] = time.Now() +} + +func (c *Cache) GetConfList(namespace, name, deviceID string, maxAge time.Duration) (*libcni.NetworkConfigList, bool) { + key := confListKey(namespace, name, deviceID) + c.mu.RLock() + conf := c.confListByKey[key] + ts := c.confListTS[key] + c.mu.RUnlock() + if conf == nil { + return nil, false + } + if maxAge > 0 && time.Since(ts) > maxAge { + return nil, false + } + return conf, true +} + +func (c *Cache) SetConfList(namespace, name, deviceID string, conf *libcni.NetworkConfigList) { + if namespace == "" || name == "" || deviceID == "" || conf == nil { + return + } + key := confListKey(namespace, name, deviceID) + c.mu.Lock() + defer c.mu.Unlock() + c.confListByKey[key] = conf + c.confListTS[key] = time.Now() +} + +func (c *Cache) WarmupNode(ctx context.Context, k8sClient client.Client, nodeName, nadNamespace string) { + if k8sClient == nil || nodeName == "" || nadNamespace == "" { + return + } + + c.mu.RLock() + last := c.nodeWarmupTS[nodeName] + c.mu.RUnlock() + if time.Since(last) < 10*time.Second { + return + } + + rs, err := getResourceSliceByNode(ctx, k8sClient, nodeName) + if err != nil { + return + } + c.SetResourceSlice(nodeName, rs) + + cniConfigNames := extractCniConfigNamesFromResourceSlice(rs) + for _, name := range cniConfigNames { + nad := &netv1.NetworkAttachmentDefinition{} + if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: nadNamespace, Name: name}, nad); err != nil { + continue + } + if nad.Spec.Config == "" { + continue + } + c.SetNADConfig(nadNamespace, name, nad.Spec.Config) + } + + c.mu.Lock() + c.nodeWarmupTS[nodeName] = time.Now() + c.mu.Unlock() +} + +func getResourceSliceByNode(ctx context.Context, k8sClient client.Client, nodeName string) (*resourcev1.ResourceSlice, error) { + fieldSelector := client.MatchingFields(map[string]string{ + resourcev1.ResourceSliceSelectorNodeName: nodeName, + resourcev1.ResourceSliceSelectorDriver: constant.DRADriverName, + }) + + rsList := &resourcev1.ResourceSliceList{} + if err := k8sClient.List(ctx, rsList, fieldSelector); err != nil { + return nil, err + } + if len(rsList.Items) == 0 { + return nil, fmt.Errorf("no ResourceSlice found for node %s", nodeName) + } + return &rsList.Items[0], nil +} + +func extractCniConfigNamesFromResourceSlice(rs *resourcev1.ResourceSlice) []string { + if rs == nil { + return nil + } + + seen := map[string]struct{}{} + var names []string + for _, dev := range rs.Spec.Devices { + if dev.Attributes == nil { + continue + } + if !IsReadyRdmaResourceDevice(dev) { + continue + } + cniConfigsStr := GetStringValueForAttributes("cniConfigs", dev.Attributes) + if cniConfigsStr == "" { + continue + } + for _, n := range strings.Split(cniConfigsStr, ",") { + n = strings.TrimSpace(n) + if n == "" { + continue + } + if _, ok := seen[n]; ok { + continue + } + seen[n] = struct{}{} + names = append(names, n) + } + } + return names +} diff --git a/pkg/dra/nri/cni.go b/pkg/dra/nri/cni.go new file mode 100644 index 0000000000..65b3c97395 --- /dev/null +++ b/pkg/dra/nri/cni.go @@ -0,0 +1,166 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/containerd/nri/pkg/api" + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + cni100 "github.com/containernetworking/cni/pkg/types/100" +) + +type CNIConfig struct { + DeviceID string `json:"deviceID"` + PicBusID string `json:"picBusID"` + ConfList cnitypes.NetConfList + RawBytes []byte +} + +type NetworkStatus struct { + Name string `json:"name"` + Device string `json:"device,omitempty"` + Interface string `json:"interface"` + IPs []string `json:"ips"` + Mac string `json:"mac"` + Gateway []string `json:"gateway"` + DeviceInfo *DeviceInfo `json:"deviceInfo"` +} + +type DeviceInfo struct { + PciAddress string `json:"pciAddress,omitempty"` + RdmaDevice string `json:"rdma-device,omitempty"` + RdmaCharDevices []string `json:"rdma-char-device,omitempty"` +} + +func (ns *NetworkStatus) parseNetworkStatus(result *cni100.Result) { + if result == nil { + return + } + + indexToIPs := make(map[int]*cni100.IPConfig, len(result.IPs)) + for _, ips := range result.IPs { + indexToIPs[*ips.Interface] = ips + } + + for index, i := range result.Interfaces { + ips := indexToIPs[index] + if ips == nil { + continue + } + ns.Interface = i.Name + ns.IPs = append(ns.IPs, ips.Address.String()) + ns.Mac = i.Mac + if ips.Gateway != nil { + ns.Gateway = append(ns.Gateway, ips.Gateway.String()) + } + } +} + +func cniAdd(ctx context.Context, confList *libcni.NetworkConfigList, rc libcni.RuntimeConf) (cnitypes.Result, error) { + cniConfig := libcni.NewCNIConfigWithCacheDir([]string{defaultCniBinPath}, defaultCniResultCacheDir, nil) + + result, err := cniConfig.AddNetworkList(ctx, confList, &rc) + if err != nil { + return nil, fmt.Errorf("error adding network list: %v", err) + } + + return result, nil +} + +func cniDel(ctx context.Context, confList *libcni.NetworkConfigList, rc libcni.RuntimeConf) error { + cniConfig := libcni.NewCNIConfigWithCacheDir([]string{defaultCniBinPath}, defaultCniResultCacheDir, nil) + + if err := cniConfig.DelNetworkList(ctx, confList, &rc); err != nil { + return fmt.Errorf("error deleting network list: %v", err) + } + + return nil +} + +func buildSecondaryCniConfig(netName, config string, vfDeviceId string) (*libcni.NetworkConfigList, error) { + if vfDeviceId == "" || netName == "" || config == "" { + return nil, fmt.Errorf("vfDeviceId or netName or config is empty") + } + + data, err := appendDeviceIDInCNIConfig(netName, vfDeviceId, config) + if err != nil { + return nil, fmt.Errorf("error appending device ID to raw CNI config bytes: %v", err) + } + + confList, err := libcni.ConfListFromBytes(data) + if err != nil { + return nil, fmt.Errorf("error converting the raw bytes into a config: %v", err) + } + + return confList, nil +} + +func appendDeviceIDInCNIConfig(netName, vfDeviceId, cniConfig string) ([]byte, error) { + if netName == "" || vfDeviceId == "" || cniConfig == "" { + return nil, fmt.Errorf("netName or vfDeviceId or cniConfig is empty") + } + + var rawConfig map[string]interface{} + var err error + + err = json.Unmarshal([]byte(cniConfig), &rawConfig) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal raw CNI config: %v", err) + } + + // Inject device ID + pList, ok := rawConfig["plugins"] + if !ok { + return nil, fmt.Errorf("failed to get plugin list") + } + + pMap, ok := pList.([]interface{}) + if !ok { + return nil, fmt.Errorf("failed to typecast plugin list") + } + + for idx, plugin := range pMap { + currentPlugin, ok := plugin.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("failed to typecast plugin #%d", idx) + } + // Inject deviceID + currentPlugin["deviceID"] = vfDeviceId + currentPlugin["pciBusID"] = vfDeviceId + } + + // Inject network name if missing from Config for the thick plugin case + if n, ok := rawConfig["name"]; !ok || n == "" { + rawConfig["name"] = netName + } + + data, err := json.Marshal(rawConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal raw CNI config: %v", err) + } + + return data, nil +} + +func buildRuntimeConfig(pod *api.PodSandbox, deviceInfo DeviceInfo, nicName string, podNetNs string) libcni.RuntimeConf { + capabilityArgs := map[string]any{} + capabilityArgs["deviceID"] = deviceInfo.PciAddress + + return libcni.RuntimeConf{ + ContainerID: pod.Uid, + NetNS: podNetNs, + IfName: nicName, + Args: [][2]string{ + {"IgnoreUnknown", "true"}, + {"K8S_POD_NAMESPACE", pod.Namespace}, + {"K8S_POD_NAME", pod.Name}, + {"K8S_POD_INFRA_CONTAINER_ID", pod.Id}, + {"K8S_POD_UID", pod.Uid}, + }, + CapabilityArgs: capabilityArgs, + } +} diff --git a/pkg/dra/nri/gpu.go b/pkg/dra/nri/gpu.go new file mode 100644 index 0000000000..3c677f49a6 --- /dev/null +++ b/pkg/dra/nri/gpu.go @@ -0,0 +1,262 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + "context" + "slices" + "sort" + "strings" + + "github.com/containerd/nri/pkg/api" + "go.uber.org/zap" + resourcev1 "k8s.io/api/resource/v1" + podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" +) + +const ( + NvidiaGPU = iota + // More GPU vendor +) + +var ( + NvidiaGPUResourceName = "nvidia.com" + NvidiaDriverGPUPath = "/proc/driver/nvidia/gpus" +) + +type networkSupport struct { + devName string + gpuCount int + gpus map[string]struct{} +} + +func (n *nriPlugin) getAllocatedGpusForPodSandbox(ctx context.Context, pod *api.PodSandbox) (gpus []string, err error) { + n.logger.Debug("Getting allocated GPUs for pod", zap.String("podID", pod.GetId())) + + // It shoule be better use Get function here, but we should enable the kubelet feature-gate + // "KubeletPodResourcesGetAllocatable"(alpha in 1.27). + // podResources, err := n.kubeletClient.Get(ctx, &podresourcesapi.GetPodResourcesRequest{ + // PodName: pod.GetName(), + // PodNamespace: pod.GetNamespace(), + // })\ + resp, err := n.kubeletClient.List(ctx, &podresourcesapi.ListPodResourcesRequest{}) + if err != nil { + n.logger.Error("Failed to get pod resource map", zap.Error(err)) + return + } + + for _, r := range resp.PodResources { + if r.Name == pod.Name && r.Namespace == pod.Namespace { + return n.getPodAllocatedGpuResources(pod, r) + } + } + + // return if no any resources allocated + return +} + +func (n *nriPlugin) getPodAllocatedGpuResources(sandbox *api.PodSandbox, PodResources *podresourcesapi.PodResources) ([]string, error) { + var gpuType int + var deviceUUIDs []string + + for _, c := range PodResources.Containers { + for _, dev := range c.Devices { + // TODO(@cyclinder): more GPU vendor + if strings.HasPrefix(dev.ResourceName, NvidiaGPUResourceName) { + // Found Nvidia GPU Resources + gpuType = NvidiaGPU + deviceUUIDs = append(deviceUUIDs, dev.DeviceIds...) + } + } + } + + if len(deviceUUIDs) == 0 { + return []string{}, nil + } + + var gpusDevicePciAddr []string + switch gpuType { + case NvidiaGPU: + n.logger.Debug("NVIDIA GPU resources allocated to pod", + zap.Strings("gpuUUIDs", deviceUUIDs), + zap.String("podName", sandbox.GetName()), + zap.String("namespace", sandbox.GetNamespace())) + + allNvidiaGpuMap, err := GetAllNvidiaGpusMap() + if err != nil { + n.logger.Warn("Failed to get GPU map", zap.Error(err)) + } + + for _, uuid := range deviceUUIDs { + if allNvidiaGpuMap[uuid] != "" { + gpusDevicePciAddr = append(gpusDevicePciAddr, allNvidiaGpuMap[uuid]) + } + } + } + + return gpusDevicePciAddr, nil +} + +// filterPfToCniConfigsWithGpuRdmaAffinity filters the CNI configs for the given GPUs, return the pf name to cni config map +func filterPfToCniConfigsWithGpuRdmaAffinity(gpus []string, resourceSlice *resourcev1.ResourceSlice) map[string]string { + if len(gpus) == 0 { + return nil + } + + // Map to track network configurations found for each GPU + gpuNetworkMap := make(map[string][]string) + // Map to track which GPUs each network interface supports + networkGpuMap := make(map[string]map[string]struct{}) + // Map to store device name to CNI config mapping + deviceNameToCniConfig := make(map[string]string) + + // Step 1: Collect all available network interface CNI configurations for each GPU + for _, dev := range resourceSlice.Spec.Devices { + if dev.Attributes == nil { + continue + } + + if !IsReadyRdmaResourceDevice(dev) { + continue + } + + // Get CNI configuration for this network interface + // cniConfigsStr maybe be more than one + cniConfigsStr := GetStringValueForAttributes("cniConfigs", dev.Attributes) + if cniConfigsStr == "" { + continue + } + + // Get GPU affinity for this network interface + gpusInAttribute := GetStringValueForAttributes("gdrAffinityGpus", dev.Attributes) + if gpusInAttribute == "" { + continue + } + + // Store device name to CNI config mapping + deviceNameToCniConfig[dev.Name] = cniConfigsStr + + // Initialize the map for this network interface if not already done + if _, exists := networkGpuMap[dev.Name]; !exists { + networkGpuMap[dev.Name] = make(map[string]struct{}) + } + + // Check if each requested GPU has affinity with this network interface + for _, gpu := range gpus { + if strings.Contains(gpusInAttribute, gpu) { + // Add this network interface's name to the corresponding GPU's config list + gpuNetworkMap[gpu] = append(gpuNetworkMap[gpu], dev.Name) + // Record that this network interface supports this GPU + networkGpuMap[dev.Name][gpu] = struct{}{} + } + } + } + + // Result map: network interface name -> CNI config + result := make(map[string]string) + + // Step 2: Check if any network interface supports all GPUs + for devName, supportedGpus := range networkGpuMap { + if len(supportedGpus) == len(gpus) { + // This network interface supports all GPUs + allGpusSupported := true + for _, gpu := range gpus { + if _, exists := supportedGpus[gpu]; !exists { + allGpusSupported = false + break + } + } + if allGpusSupported { + // Return a map with just this network interface + result[devName] = deviceNameToCniConfig[devName] + return result + } + } + } + + // Step 3: If no network interface supports all GPUs, we need to find a combination of networks + // that can cover all GPUs with minimal number of networks + + // First, try to find networks that support multiple GPUs + var coveredGpus = make(map[string]struct{}) + var selectedDevices = make(map[string]struct{}) + + // Sort networks by the number of GPUs they support (descending) + var networkSupports []networkSupport + for devName, supportedGpus := range networkGpuMap { + networkSupports = append(networkSupports, networkSupport{ + devName: devName, + gpuCount: len(supportedGpus), + gpus: supportedGpus, + }) + } + + // Sort by GPU count descending + sort.Slice(networkSupports, func(i, j int) bool { + return networkSupports[i].gpuCount > networkSupports[j].gpuCount + }) + + // Greedily select networks that cover the most uncovered GPUs + for len(coveredGpus) < len(gpus) && len(networkSupports) > 0 { + // Find the network that covers the most uncovered GPUs + bestIdx := -1 + bestNewCoverage := 0 + + for i, ns := range networkSupports { + // Count how many new GPUs this network would cover + newCoverage := 0 + for gpu := range ns.gpus { + if _, covered := coveredGpus[gpu]; !covered { + newCoverage++ + } + } + + if newCoverage > bestNewCoverage { + bestNewCoverage = newCoverage + bestIdx = i + } + } + + // If we couldn't find a network that covers new GPUs, break + if bestIdx == -1 || bestNewCoverage == 0 { + break + } + + // Add the selected network + selected := networkSupports[bestIdx] + if _, exists := selectedDevices[selected.devName]; !exists { + selectedDevices[selected.devName] = struct{}{} + result[selected.devName] = deviceNameToCniConfig[selected.devName] + } + + // Mark the GPUs as covered + for gpu := range selected.gpus { + coveredGpus[gpu] = struct{}{} + } + + // Remove the selected network from consideration + networkSupports = slices.Delete(networkSupports, bestIdx, bestIdx+1) + } + + // If we've covered all GPUs, return the selected configs + if len(coveredGpus) == len(gpus) { + return result + } + + // Step 4: If no single network interface can support all GPUs, select one for each GPU + for _, gpu := range gpus { + devNames, found := gpuNetworkMap[gpu] + if !found || len(devNames) == 0 { + continue + } + + // Add the first configuration if not already added + devName := devNames[0] + if _, exists := selectedDevices[devName]; !exists { + selectedDevices[devName] = struct{}{} + result[devName] = deviceNameToCniConfig[devName] + } + } + + return result +} diff --git a/pkg/dra/nri/gpu_test.go b/pkg/dra/nri/gpu_test.go new file mode 100644 index 0000000000..8a523cb0ec --- /dev/null +++ b/pkg/dra/nri/gpu_test.go @@ -0,0 +1,286 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + resourcev1 "k8s.io/api/resource/v1" + "k8s.io/utils/ptr" +) + +var _ = Describe("filterPfToCniConfigsWithGpuRdmaAffinity", func() { + + Context("empty gpus", func() { + It("should return nil", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{}, + }, + }) + Expect(got).To(BeNil()) + }) + }) + + Context("no matching devices", func() { + It("should return nil", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:04:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + }, + }, + }) + Expect(len(got)).To(Equal(0)) + }) + + It("no rdma network nic found", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(false)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + }, + }, + }) + Expect(len(got)).To(Equal(0)) + }) + }) + + Context("single gpu matched with single network", func() { + It("should return the one network", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + { + Name: "ens34", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens34-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:08:00.0")}, + }, + }, + }, + }, + }) + Expect(got).To(HaveLen(1)) + Expect(got).To(HaveKeyWithValue("ens33", "ens33-sriov1")) + }) + }) + + Context("single gpu matched multi networks nic", func() { + It("should return only first network", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + { + Name: "ens34", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens34-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + }, + }, + }) + Expect(got).To(HaveLen(1)) + Expect(got).To(HaveKeyWithValue("ens33", "ens33-sriov1")) + }) + }) + + Context("multiple gpus only matched with single networks", func() { + It("should return the matched network", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0", "0000:08:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0,0000:08:00.0")}, + }, + }, + { + Name: "ens34", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens34-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:10:00.0,0000:12:00.0")}, + }, + }, + }, + }, + }) + Expect(got).To(HaveLen(1)) + Expect(got).To(HaveKeyWithValue("ens33", "ens33-sriov1")) + }) + }) + + Context("multiple gpus with shared network", func() { + It("if all gpus is matched one network, should return the network", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0", "0000:08:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + { + Name: "ens34", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens34-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0,0000:08:00.0")}, + }, + }, + { + Name: "ens35", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens35-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:08:00.0")}, + }, + }, + }, + }, + }) + Expect(got).To(HaveLen(1)) + Expect(got).To(HaveKeyWithValue("ens34", "ens34-sriov1")) + }) + + It("two gpus matched with one networks, one gpu matched with another network", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0", "0000:08:00.0", "0000:10:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + { + Name: "ens34", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens34-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0,0000:08:00.0")}, + }, + }, + { + Name: "ens35", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens35-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:08:00.0")}, + }, + }, + { + Name: "ens36", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens36-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:10:00.0")}, + }, + }, + }, + }, + }) + Expect(got).To(HaveLen(2)) + Expect(got).To(HaveKeyWithValue("ens34", "ens34-sriov1")) + Expect(got).To(HaveKeyWithValue("ens36", "ens36-sriov1")) + }) + + It("every gpu matched with one network", func() { + got := filterPfToCniConfigsWithGpuRdmaAffinity([]string{"0000:06:00.0", "0000:08:00.0"}, &resourcev1.ResourceSlice{ + Spec: resourcev1.ResourceSliceSpec{ + Devices: []resourcev1.Device{ + { + Name: "ens33", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens33-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:06:00.0")}, + }, + }, + { + Name: "ens34", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens34-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:08:00.0")}, + }, + }, + { + Name: "ens35", + Attributes: map[resourcev1.QualifiedName]resourcev1.DeviceAttribute{ + "state": {StringValue: ptr.To("up")}, + "rdma": {BoolValue: ptr.To(true)}, + "cniConfigs": {StringValue: ptr.To("ens35-sriov1")}, + "gdrAffinityGpus": {StringValue: ptr.To("0000:10:00.0")}, + }, + }, + }, + }, + }) + Expect(got).To(HaveLen(2)) + Expect(got).To(HaveKeyWithValue("ens34", "ens34-sriov1")) + Expect(got).To(HaveKeyWithValue("ens33", "ens33-sriov1")) + }) + }) + +}) diff --git a/pkg/dra/nri/nri.go b/pkg/dra/nri/nri.go new file mode 100644 index 0000000000..72f1fe0046 --- /dev/null +++ b/pkg/dra/nri/nri.go @@ -0,0 +1,822 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/spidernet-io/spiderpool/pkg/logutils" + "github.com/spidernet-io/spiderpool/pkg/networking/networking" + "github.com/spidernet-io/spiderpool/pkg/utils" + + "github.com/Mellanox/rdmamap" + "github.com/containerd/nri/pkg/api" + "github.com/containerd/nri/pkg/stub" + "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/vishvananda/netlink" + + "go.uber.org/zap" + "google.golang.org/grpc" + + "github.com/containernetworking/cni/libcni" + cni100 "github.com/containernetworking/cni/pkg/types/100" + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + corev1 "k8s.io/api/core/v1" + resourcev1 "k8s.io/api/resource/v1" + "k8s.io/client-go/util/retry" + podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + _ stub.RunPodInterface = (*nriPlugin)(nil) + _ stub.StopPodInterface = (*nriPlugin)(nil) + _ stub.CreateContainerInterface = (*nriPlugin)(nil) +) + +var ( + defaultCniResultCacheDir = "/var/lib/spidernet/nri" + defaultCniBinPath = "/opt/cni/bin" +) + +type nriPlugin struct { + nodeName string + spiderpoolNamespace string + cniBinPath string + gpuResourceNames map[string]struct{} + logger *zap.Logger + nri stub.Stub + kubeletClient podresourcesapi.PodResourcesListerClient + conn *grpc.ClientConn + client client.Client +} + +func Run(ctx context.Context, client client.Client, nodeName string) error { + // Check RDMA namespace mode, ensure it is "exclusive" mode + rdmaNsMode, err := netlink.RdmaSystemGetNetnsMode() + if err != nil { + return fmt.Errorf("failed to get RDMA namespace mode: %v", err) + } + if rdmaNsMode != "exclusive" { + return fmt.Errorf("NRI plugin must work in exclusive RDMA namespace mode, current mode: %s", rdmaNsMode) + } + + n := &nriPlugin{ + nodeName: nodeName, + spiderpoolNamespace: utils.GetAgentNamespace(), + logger: logutils.Logger.Named("nri"), + gpuResourceNames: make(map[string]struct{}), + client: client, + } + // register the NRI plugin + nriOpts := []stub.Option{ + stub.WithPluginName(constant.DRADriverName), + stub.WithPluginIdx("00"), + } + stub, err := stub.New(n, nriOpts...) + if err != nil { + return fmt.Errorf("failed to create plugin stub: %v", err) + } + n.nri = stub + + n.kubeletClient, n.conn, err = GetKubeletResourceClient() + if err != nil { + return err + } + + // TODO: make it configuiretable + n.gpuResourceNames[NvidiaGPUResourceName] = struct{}{} + + go func() { + if err = n.nri.Run(ctx); err != nil { + n.logger.Error("failed to start nri plugin", zap.Error(err)) + n.nri.Stop() + n.conn.Close() + } + }() + + return nil +} + +func hasNriDeviceClassInClaim(rc *resourcev1.ResourceClaim) bool { + if rc == nil { + return false + } + for _, req := range rc.Spec.Devices.Requests { + if req.Exactly != nil && req.Exactly.DeviceClassName == constant.DRANRIDeviceClass { + return true + } + for _, sub := range req.FirstAvailable { + if sub.DeviceClassName == constant.DRANRIDeviceClass { + return true + } + } + } + return false +} + +func (n *nriPlugin) RunPodSandbox(ctx context.Context, sandbox *api.PodSandbox) error { + l := n.logger.With(zap.String("NRI Hook", "RunPodSandbox"), zap.String("podName", sandbox.Name), zap.String("namespace", sandbox.Namespace)) + + isHostNetwork := true + for _, namespace := range sandbox.Linux.Namespaces { + if namespace.Type == "network" { + isHostNetwork = false + } + } + + if isHostNetwork { + l.Debug("Skip setup DRA network for hostNetwork pod") + return nil + } + + isContinueFromCache := false + if claimRefs, ok := GetCache().GetPodClaimRefs(sandbox.Uid, sandbox.Namespace, sandbox.Name); ok { + foundCachedClaim := false + for _, ref := range claimRefs { + if rc, ok := GetCache().GetResourceClaim(ref.Namespace, ref.Name, 5*time.Minute); ok { + foundCachedClaim = true + if hasNriDeviceClassInClaim(rc) { + isContinueFromCache = true + break + } + } + } + if foundCachedClaim && !isContinueFromCache { + l.Debug("Pod has resource claims but none require NRI device class, ignore") + return nil + } + } + // Check if devices have already been allocated for this pod + // If devices have already been allocated for this pod, skip allocation + k8sPod := &corev1.Pod{} + if err := n.client.Get(ctx, client.ObjectKey{Name: sandbox.GetName(), Namespace: sandbox.GetNamespace()}, k8sPod); err != nil { + l.Error("Failed to get pod", zap.Error(err)) + return err + } + + if len(k8sPod.Spec.ResourceClaims) == 0 { + l.Debug("Pod has no resource claim configured, ignore") + return nil + } + + l.Debug("Start to setup dra network") + // Continue with device allocation for the first container + gpus, err := n.getAllocatedGpusForPodSandbox(ctx, sandbox) + if err != nil { + l.Error("Failed to get allocated gpus", zap.Error(err)) + return err + } + + if len(gpus) == 0 { + // no GPU allocated to this pod + n.logger.Error("No GPU resources allocated to this pod, error to setup dra network", + zap.String("podName", sandbox.GetName()), + zap.String("namespace", sandbox.GetNamespace())) + return fmt.Errorf("error to setup dra network: no GPU resources allocated to this pod") + } + + if !isContinueFromCache { + var resourceClaimName string + for _, rc := range k8sPod.Spec.ResourceClaims { + if rc.ResourceClaimTemplateName != nil && *rc.ResourceClaimTemplateName != "" { + resourceClaimName = *rc.ResourceClaimTemplateName + } + if rc.ResourceClaimName != nil && *rc.ResourceClaimName != "" { + resourceClaimName = *rc.ResourceClaimName + } + } + + if resourceClaimName == "" { + // no resource claim allocated to this pod + return nil + } + + rct := &resourcev1.ResourceClaimTemplate{} + if err := n.client.Get(ctx, client.ObjectKey{Name: resourceClaimName, Namespace: k8sPod.Namespace}, rct); err != nil { + return err + } + + isContinue := false + for _, req := range rct.Spec.Spec.Devices.Requests { + if req.Exactly != nil && req.Exactly.DeviceClassName == constant.DRANRIDeviceClass { + isContinue = true + break + } + for _, sub := range req.FirstAvailable { + if sub.DeviceClassName == constant.DRANRIDeviceClass { + isContinue = true + break + } + } + if isContinue { + break + } + } + + if !isContinue { + return nil + } + } + + l.Info("Start to setup DRA network for pod") + resourceSlice, err := n.getResourceSliceByNode(ctx) + if err != nil { + n.logger.Error("Failed to get resource slice", zap.Error(err)) + return err + } + + deviceToCniConfigs := filterPfToCniConfigsWithGpuRdmaAffinity(gpus, resourceSlice) + if len(deviceToCniConfigs) == 0 { + l.Info("No matched CNI configs with GPU Affinity, Ignore setup dra network") + return nil + } + + l.Debug("Found matched CNI configs with GPU Affinity", zap.Any("deviceToCniConfigs", deviceToCniConfigs)) + status, err := n.initPodRdmaNetwork(ctx, l, deviceToCniConfigs, sandbox) + if err != nil || len(status) == 0 { + l.Error("Failed to set pod network with gpu affinity", zap.Error(err)) + return err + } + + GetCache().SetPodNetworkStatus(sandbox.Uid, status) + n.updateResourceClaimNetworkData(ctx, l, sandbox, status) + + statusJSON, err := json.Marshal(status) + if err != nil { + l.Error("Failed to marshal network status", zap.Error(err)) + return fmt.Errorf("failed to marshal network status: %v", err) + } + + l.Info("Successfully Setup Pod RDMA Network, Update the status to Pod annotations") + // Serialize netStatus to JSON string and update pod annotations in Kubernetes + if err = n.updatePodNetworkStatus(ctx, l, string(statusJSON), k8sPod); err != nil { + l.Error("Failed to update pod network status", zap.Error(err)) + return err + } + l.Debug("Successfully update pod network status to annotations", zap.Any("netStatus", status)) + return nil +} + +func (n *nriPlugin) CreateContainer(ctx context.Context, sandbox *api.PodSandbox, container *api.Container) (*api.ContainerAdjustment, []*api.ContainerUpdate, error) { + l := n.logger.With(zap.String("NRI Hook", "CreateContainer"), zap.String("pod", fmt.Sprintf("%s/%s", sandbox.GetNamespace(), sandbox.GetName())), zap.String("container", container.Name)) + var netStatus []*NetworkStatus + if cached, ok := GetCache().GetPodNetworkStatus(sandbox.Uid); ok { + netStatus = cached + } else { + k8sPod := &corev1.Pod{} + if err := n.client.Get(ctx, client.ObjectKey{Name: sandbox.GetName(), Namespace: sandbox.GetNamespace()}, k8sPod); err != nil { + l.Error("Failed to get pod", zap.Error(err)) + return nil, nil, err + } + if status, ok := k8sPod.Annotations[constant.AnnoDRAPodNetworkStatus]; !ok && len(status) == 0 { + l.Info("No DRA network status found for pod annotations, skip mount RDMA devices") + return nil, nil, nil + } + ns, err := n.getNetworkStatusFromPodAnnotations(sandbox, k8sPod) + if err != nil { + l.Error("Failed to get network status from pod annotations", zap.Error(err)) + return nil, nil, err + } + netStatus = ns + } + + if len(netStatus) == 0 { + l.Info("No DRA network status found for pod annotations, skip mount RDMA devices") + return nil, nil, nil + } + + // Convert the allocated RDMA devices to mounts + // mounts := n.parseRDMACharDevicesToMounts(netStatus) + deviceNodes, linuxDeviceCgroup, err := n.parseLinuxDeviceNode(netStatus) + if err != nil { + l.Error("Failed to parse device node", zap.Error(err)) + return nil, nil, err + } + l.Info("Successfully get device node and RDMA char devices, Containerd NRI Hook will adjust these devices in CreateContainer", + zap.Any("deviceNodes", deviceNodes)) + + return &api.ContainerAdjustment{ + Linux: &api.LinuxContainerAdjustment{ + Devices: deviceNodes, + Resources: &api.LinuxResources{ + Devices: linuxDeviceCgroup, + }, + }, + }, nil, nil +} + +func (n *nriPlugin) StopPodSandbox(ctx context.Context, pod *api.PodSandbox) error { + l := n.logger.With(zap.String("NRI Hook", "StopPodSandbox"), zap.String("pod", fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName()))) + var netStatus []*NetworkStatus + if cached, ok := GetCache().GetPodNetworkStatus(pod.Uid); ok { + netStatus = cached + } else { + k8sPod := &corev1.Pod{} + if err := n.client.Get(ctx, client.ObjectKey{Name: pod.Name, Namespace: pod.Namespace}, k8sPod); err != nil { + l.Error("Failed to get pod", zap.Error(err)) + return nil + } + if _, ok := k8sPod.Annotations["dra.spidernet.io/nri"]; !ok && len(k8sPod.Spec.ResourceClaims) == 0 { + l.Info("The pod is not using DRA network, ignore invoke CNI DEL") + return nil + } + l.Debug("The pod is using DRA network, Start to invoke CNI DEL") + ns, err := n.getNetworkStatusFromPodAnnotations(pod, k8sPod) + if err != nil { + l.Error("Failed to get network status from Pod annotations", zap.Error(err)) + return nil + } + netStatus = ns + } + + if len(netStatus) == 0 { + l.Info("No network status found for pod, ignore invoke CNI DEL") + return nil + } + + podNetNs, err := n.getPodNetworkNamespace(pod) + if err != nil { + l.Error("Failed to get pod network namespace", zap.Error(err)) + return fmt.Errorf("failed to get pod network namespace: %v", err) + } + + for _, status := range netStatus { + if status.Name == "" || status.DeviceInfo == nil { + l.Error("Invalid network status entry", zap.Any("status", status)) + continue + } + + confList, err := n.loadCniConfig(ctx, l, status.Name, status.DeviceInfo.PciAddress) + if err != nil { + l.Error("Failed to load CNI config", zap.Error(err)) + continue + } + + l.Debug("Got final CNI config, Start invoke CNI DEL", zap.Any("confList", confList)) + rc := buildRuntimeConfig(pod, DeviceInfo{PciAddress: status.DeviceInfo.PciAddress}, status.Interface, podNetNs) + if err := cniDel(ctx, confList, rc); err != nil { + l.Error("Failed to delete pod network", zap.Error(err)) + continue + } + } + + l.Info("successfully invoke CNI DEL in StopPodSandbox") + GetCache().DeletePod(pod.Uid) + return nil +} + +func (n *nriPlugin) RemovePodSandbox(ctx context.Context, pod *api.PodSandbox) error { + return nil +} + +func (n *nriPlugin) Synchronize(_ context.Context, pods []*api.PodSandbox, containers []*api.Container) ([]*api.ContainerUpdate, error) { + return nil, nil +} + +func (n *nriPlugin) Shutdown(_ context.Context) { + n.logger.Info("NRI plugin shutting down...") +} + +func (n *nriPlugin) initPodRdmaNetwork(ctx context.Context, l *zap.Logger, pfToCniConfigs map[string]string, sandbox *api.PodSandbox) ([]*NetworkStatus, error) { + podNetNs, err := n.getPodNetworkNamespace(sandbox) + if err != nil { + l.Error("Failed to get pod network namespace", zap.Error(err)) + return nil, fmt.Errorf("failed to get pod network namespace: %v", err) + } + + var netStatus []*NetworkStatus + idx := 1 + for pf, cniConfigName := range pfToCniConfigs { + podNicName := fmt.Sprintf("net%d", idx) + // Inject RDMA device to pod network namespace + deviceInfo, err := n.getRdmaDevicesFromRdmaNic(l, pf) + if err != nil { + l.Error("Failed to get RDMA device from rdma nic", + zap.String("pfName", pf), zap.Error(err)) + return nil, err + } + + var result *cni100.Result + result, err = n.setupPodNetwork(ctx, l, sandbox, cniConfigName, deviceInfo, podNicName, podNetNs) + if err != nil { + l.Error("Failed to setup pod network", zap.Error(err)) + return nil, err + } + + status := &NetworkStatus{ + Name: cniConfigName, + Device: pf, + DeviceInfo: &deviceInfo, + } + status.parseNetworkStatus(result) + netStatus = append(netStatus, status) + idx += 1 + } + + return netStatus, nil +} + +func (n *nriPlugin) updateResourceClaimNetworkData(ctx context.Context, l *zap.Logger, sandbox *api.PodSandbox, statuses []*NetworkStatus) { + if sandbox == nil || len(statuses) == 0 { + return + } + + claimRefs, ok := GetCache().GetPodClaimRefs(sandbox.Uid, sandbox.Namespace, sandbox.Name) + if !ok { + l.Debug("No pod->claim mapping found in cache, skip updating ResourceClaim NetworkData") + return + } + + for _, ref := range claimRefs { + for _, st := range statuses { + if st == nil { + continue + } + if st.Device == "" { + continue + } + if st.Interface == "" && len(st.IPs) == 0 && st.Mac == "" { + continue + } + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + rc := &resourcev1.ResourceClaim{} + if err := n.client.Get(ctx, client.ObjectKey{Namespace: ref.Namespace, Name: ref.Name}, rc); err != nil { + return err + } + + updated := false + for i := range rc.Status.Devices { + d := &rc.Status.Devices[i] + if d.Driver != constant.DRADriverName { + continue + } + if d.Device != st.Device { + continue + } + d.NetworkData = &resourcev1.NetworkDeviceData{ + InterfaceName: st.Interface, + IPs: st.IPs, + HardwareAddress: st.Mac, + } + updated = true + break + } + + if !updated { + return nil + } + return n.client.Status().Update(ctx, rc) + }) + if err != nil { + l.Debug("Failed to update ResourceClaim NetworkData", zap.String("resourceClaim", fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)), zap.Error(err)) + } + } + } +} + +// setupPodNetwork sets up the pod network +func (n *nriPlugin) setupPodNetwork(ctx context.Context, l *zap.Logger, pod *api.PodSandbox, cniConfigName string, deviceInfo DeviceInfo, nicName string, podNetNs string) (*cni100.Result, error) { + // Get NetworkAttachmentDefinition object + confList, err := n.loadCniConfig(ctx, l, cniConfigName, deviceInfo.PciAddress) + if err != nil { + return nil, fmt.Errorf("failed to load CNI config: %v", err) + } + + l.Debug("Got final CNI config, Start invoke CNI ADD", zap.String("confList", string(confList.Bytes))) + result, err := cniAdd(ctx, confList, buildRuntimeConfig(pod, deviceInfo, nicName, podNetNs)) + if err != nil { + return nil, fmt.Errorf("failed to add network: %v", err) + } + + res, err := cni100.NewResultFromResult(result) + if err != nil { + return nil, fmt.Errorf("failed to convert result: %v", err) + } + + return res, nil +} + +// getPodNetworkNamespace gets the network namespace of a Pod +func (n *nriPlugin) getPodNetworkNamespace(pod *api.PodSandbox) (string, error) { + // Get the network namespace path of the Pod + for _, namespace := range pod.Linux.GetNamespaces() { + if namespace.Type == "network" { + return namespace.Path, nil + } + } + + return "", fmt.Errorf("failed to get network namespace from pod %s", pod.Id) +} + +func (n *nriPlugin) getRdmaDevicesFromRdmaNic(l *zap.Logger, device string) (DeviceInfo, error) { + l.Debug("Get All RDMA devices from RDMA nic") + // Get all available VF from the device + vfDevices, err := networking.GetSriovAvailableVfPciAddressesForNetDev(device) + if err != nil { + return DeviceInfo{}, fmt.Errorf("failed to get VFs from device %s: %v", device, err) + } + + vfName, err := networking.GetNetNameFromPciAddress(vfDevices[0]) + if err != nil { + return DeviceInfo{}, fmt.Errorf("failed to get vf name from pci address: %v", err) + } + + l.Debug("Found Available VFs for device, Get All RDMA devices from vf", zap.String("vfName", vfName)) + deviceInfo := DeviceInfo{ + PciAddress: vfDevices[0], + } + + rdmaDevice, err := rdmamap.GetRdmaDeviceForNetdevice(vfName) + if err != nil { + return DeviceInfo{}, fmt.Errorf("failed to get rdma device for network device %s: %v", vfName, err) + } + + if rdmaDevice != "" { + // Add the RDMA device to the allocation record + charRdmaDevices := rdmamap.GetRdmaCharDevices(rdmaDevice) + deviceInfo.RdmaDevice = rdmaDevice + deviceInfo.RdmaCharDevices = charRdmaDevices + } + + // NOTE: rdma-cni do same thing like this + // Inject RDMA device to pod network namespace + // hostDev, err := netlink.RdmaLinkByName(rdmaDevice) + // if err != nil { + // return DeviceInfo{}, fmt.Errorf("failed to get rdma link for network device %s: %v", rdmaDevice, err) + // } + + // err = netlink.RdmaLinkSetNsFd(hostDev, uint32(podNetNs)) + // if err != nil { + // return DeviceInfo{}, fmt.Errorf("failed to set RDMA device for network device %s: %v", device, err) + // } + + l.Debug("Successfully get all RDMA devices from VF") + return deviceInfo, nil +} + +func (n *nriPlugin) getResourceSliceByNode(ctx context.Context) (*resourcev1.ResourceSlice, error) { + if rs, ok := GetCache().GetResourceSlice(n.nodeName, 30*time.Second); ok { + return rs, nil + } + + // Use field selectors to filter ResourceSlices by both nodeName and DRADriverName + // Create field selector for controller-runtime client + fieldSelector := client.MatchingFields(map[string]string{ + resourcev1.ResourceSliceSelectorNodeName: n.nodeName, + resourcev1.ResourceSliceSelectorDriver: constant.DRADriverName, + }) + + rsList := &resourcev1.ResourceSliceList{} + if err := n.client.List(ctx, rsList, fieldSelector); err != nil { + return nil, err + } + + // Expect only one ResourceSlice to be returned + if len(rsList.Items) == 0 { + return nil, fmt.Errorf("no ResourceSlice found for node %s and driver %s", n.nodeName, constant.DRADriverName) + } + + if len(rsList.Items) > 1 { + n.logger.Warn("Multiple ResourceSlices found when only one was expected", + zap.String("nodeName", n.nodeName), + zap.String("driver", constant.DRADriverName), + zap.Int("count", len(rsList.Items))) + } + + // Use the first ResourceSlice + rs := &rsList.Items[0] + GetCache().SetResourceSlice(n.nodeName, rs) + return rs, nil +} + +func (n *nriPlugin) loadCniConfig(ctx context.Context, l *zap.Logger, netName, deviceId string) (*libcni.NetworkConfigList, error) { + if cached, ok := GetCache().GetConfList(n.spiderpoolNamespace, netName, deviceId, 5*time.Minute); ok { + return cached, nil + } + + if cfg, ok := GetCache().GetNADConfig(n.spiderpoolNamespace, netName, 5*time.Minute); ok { + confList, err := buildSecondaryCniConfig(netName, cfg, deviceId) + if err != nil { + return nil, fmt.Errorf("failed to build CNI config from NetworkAttachmentDefinition %s/%s: %v", n.spiderpoolNamespace, netName, err) + } + GetCache().SetConfList(n.spiderpoolNamespace, netName, deviceId, confList) + return confList, nil + } + + nad := &netv1.NetworkAttachmentDefinition{} + if err := n.client.Get(ctx, client.ObjectKey{Namespace: n.spiderpoolNamespace, Name: netName}, nad); err != nil { + return nil, fmt.Errorf("failed to get NetworkAttachmentDefinition %s/%s: %v", n.spiderpoolNamespace, netName, err) + } + + if nad.Spec.Config == "" { + return nil, fmt.Errorf("NetworkAttachmentDefinition %s/%s has empty config", n.spiderpoolNamespace, nad.Name) + } + + l.Debug("Got CNI config from NetworkAttachmentDefinition", zap.String("nadName", nad.Name), zap.String("config", nad.Spec.Config)) + GetCache().SetNADConfig(n.spiderpoolNamespace, netName, nad.Spec.Config) + confList, err := buildSecondaryCniConfig(nad.Name, nad.Spec.Config, deviceId) + if err != nil { + return nil, fmt.Errorf("failed to build CNI config from NetworkAttachmentDefinition %s/%s: %v", n.spiderpoolNamespace, nad.Name, err) + } + GetCache().SetConfList(n.spiderpoolNamespace, netName, deviceId, confList) + + return confList, nil +} + +func (n *nriPlugin) updatePodNetworkStatus(ctx context.Context, l *zap.Logger, netStatusJSON string, k8sPod *corev1.Pod) error { + // Initialize annotations map if it doesn't exist + if k8sPod.Annotations == nil { + k8sPod.Annotations = make(map[string]string) + } + + // Update local pod annotations with network status + k8sPod.Annotations[constant.AnnoDRAPodNetworkStatus] = string(netStatusJSON) + + // Update Pod in Kubernetes API with retry logic + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Get the latest version of the Pod before attempting an update + latestPod := &corev1.Pod{} + err := n.client.Get(ctx, client.ObjectKey{Namespace: k8sPod.Namespace, Name: k8sPod.Name}, latestPod) + if err != nil { + l.Error("Failed to get latest Pod version", zap.Error(err)) + return err + } + + // Apply our annotation update to the latest version + if latestPod.Annotations == nil { + latestPod.Annotations = make(map[string]string) + } + latestPod.Annotations[constant.AnnoDRAPodNetworkStatus] = k8sPod.Annotations[constant.AnnoDRAPodNetworkStatus] + + // Update the Pod with the latest version + return n.client.Update(ctx, latestPod) + }) + + if err != nil { + l.Error("Failed to update Pod annotations in Kubernetes after retries", zap.Error(err)) + return fmt.Errorf("failed to update Pod annotations in Kubernetes with retries: %v", err) + } + + return nil +} + +func (n *nriPlugin) parseRDMACharDevicesToMounts(status []*NetworkStatus) []*api.Mount { + if len(status) == 0 { + return []*api.Mount{} + } + + mounts := make([]*api.Mount, 0, len(status)*4) + + // Add each RDMA character device as a mount + for _, d := range status { + for _, charDevice := range d.DeviceInfo.RdmaCharDevices { + if charDevice == rdmamap.RdmaUcmDevice { + continue + } + + // Create a mount for the device + mount := &api.Mount{ + Source: charDevice, + Destination: charDevice, + Type: "bind", + Options: []string{"bind", "rw"}, + } + mounts = append(mounts, mount) + } + } + + // Add the RDMA CM device if it's not already included + mounts = append(mounts, &api.Mount{ + Source: rdmamap.RdmaUcmDevice, + Destination: rdmamap.RdmaUcmDevice, + Type: "bind", + Options: []string{"bind", "rw"}, + }) + + return mounts +} + +func (n *nriPlugin) parseLinuxDeviceNode(status []*NetworkStatus) ([]*api.LinuxDevice, []*api.LinuxDeviceCgroup, error) { + if len(status) == 0 { + return []*api.LinuxDevice{}, []*api.LinuxDeviceCgroup{}, nil + } + + deviceNodes := make([]*api.LinuxDevice, 0, len(status)*3) + linuxDeviceCgroup := make([]*api.LinuxDeviceCgroup, 0, len(status)*3) + for _, d := range status { + for _, c := range d.DeviceInfo.RdmaCharDevices { + deviceNode, err := DeviceFromPath(c) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse devices node for rdma char devices: %v", err) + } + deviceNodes = append(deviceNodes, deviceNode) + + linuxDeviceCgroup = append(linuxDeviceCgroup, &api.LinuxDeviceCgroup{ + Allow: true, + Type: deviceNode.Type, + Major: &api.OptionalInt64{Value: deviceNode.Major}, + Minor: &api.OptionalInt64{Value: deviceNode.Minor}, + Access: "rw", + }) + } + } + + return deviceNodes, linuxDeviceCgroup, nil +} + +// cacheNetworkStatusToFile saves the network status JSON to a local file +// func (n *nriPlugin) cacheNetworkStatusToFile(l *zap.Logger, namespace, podName, podUID, networkStatusJSON string) error { +// // Create the directory structure if it doesn't exist +// networkStatusDir := filepath.Join(defaultCniResultCacheDir, "network-status") +// if err := os.MkdirAll(networkStatusDir, 0755); err != nil { +// return fmt.Errorf("failed to create network status directory %s: %v", networkStatusDir, err) +// } + +// // Create a filename based on the pod's namespace and name +// // This ensures uniqueness and makes it easy to find the file for a specific pod +// fileName := fmt.Sprintf("%s_%s_%s_network_status.json", namespace, podName, podUID) +// filePath := filepath.Join(networkStatusDir, fileName) + +// // Write the network status JSON to the file +// if err := os.WriteFile(filePath, []byte(networkStatusJSON), 0644); err != nil { +// return fmt.Errorf("failed to write network status to file %s: %v", filePath, err) +// } + +// l.Debug("Successfully saved network status to local file", +// zap.String("namespace", namespace), +// zap.String("podName", podName), +// zap.String("podUID", podUID), +// zap.String("filePath", filePath)) + +// return nil +// } + +// getNetworkStatusFromCache reads network status from the local cache file +// func (n *nriPlugin) getNetworkStatusFromCache(namespace, podName, podUID string) ([]*NetworkStatus, error) { +// // Construct the expected file path +// networkStatusDir := filepath.Join(defaultCniResultCacheDir, "network-status") +// fileName := fmt.Sprintf("%s_%s_%s.json", namespace, podName, podUID) +// filePath := filepath.Join(networkStatusDir, fileName) + +// // Check if the file exists +// if _, err := os.Stat(filePath); os.IsNotExist(err) { +// return nil, fmt.Errorf("network status file not found: %s", filePath) +// } + +// // Read the file content +// data, err := os.ReadFile(filePath) +// if err != nil { +// return nil, fmt.Errorf("failed to read network status file %s: %v", filePath, err) +// } + +// // Parse the JSON data +// var netStatus []*NetworkStatus +// if err := json.Unmarshal(data, &netStatus); err != nil { +// return nil, fmt.Errorf("failed to unmarshal network status from file: %v", err) +// } + +// return netStatus, nil +// } + +func (n *nriPlugin) getNetworkStatusFromPodAnnotations(sandbox *api.PodSandbox, k8sPod *corev1.Pod) ([]*NetworkStatus, error) { + var netStatus []*NetworkStatus + // Check if network status annotation exists + if k8sPod.Annotations == nil || k8sPod.Annotations[constant.AnnoDRAPodNetworkStatus] == "" { + return nil, nil + } + + // Parse network status from annotation + if err := json.Unmarshal([]byte(k8sPod.Annotations[constant.AnnoDRAPodNetworkStatus]), &netStatus); err != nil { + n.logger.Error("Failed to unmarshal network status from Pod annotation", zap.Error(err)) + return nil, err + } + + return netStatus, nil +} + +// deleteNetworkStatusCache deletes the network status cache file +// If the file doesn't exist, it returns nil (no error) +// func (n *nriPlugin) deleteNetworkStatusCache(namespace, podName, podUID string) error { +// // Construct the expected file path +// networkStatusDir := filepath.Join(defaultCniResultCacheDir, "network-status") +// fileName := fmt.Sprintf("%s_%s_%s.json", namespace, podName, podUID) +// filePath := filepath.Join(networkStatusDir, fileName) + +// // Check if the file exists +// if _, err := os.Stat(filePath); os.IsNotExist(err) { +// // File doesn't exist, nothing to delete +// return nil +// } + +// // Delete the file +// if err := os.Remove(filePath); err != nil { +// return fmt.Errorf("failed to delete network status file %s: %v", filePath, err) +// } + +// return nil +// } diff --git a/pkg/dra/nri/nri_suite_test.go b/pkg/dra/nri/nri_suite_test.go new file mode 100644 index 0000000000..a6c73afc20 --- /dev/null +++ b/pkg/dra/nri/nri_suite_test.go @@ -0,0 +1,15 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNri(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "NRI Suite") +} diff --git a/pkg/dra/nri/oci.go b/pkg/dra/nri/oci.go new file mode 100644 index 0000000000..96f6b03123 --- /dev/null +++ b/pkg/dra/nri/oci.go @@ -0,0 +1,79 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// this file is inspired by containerd: +// github.com/containerd/containerd/v2/pkg/oci/utils_unix.go +package nri + +import ( + "errors" + + "github.com/containerd/nri/pkg/api" + "golang.org/x/sys/unix" +) + +const ( + wildcardDevice = "a" //nolint:nolintlint,unused,varcheck // currently unused, but should be included when upstreaming to OCI runtime-spec. + blockDevice = "b" + charDevice = "c" // or "u" + fifoDevice = "p" +) + +// ErrNotADevice denotes that a file is not a valid linux device. +// When checking this error, use errors.Is(err, oci.ErrNotADevice) +var ErrNotADevice = errors.New("not a device node") + +func DeviceFromPath(path string) (*api.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = uint64(stat.Rdev) //nolint:nolintlint,unconvert // the type is 32bit on mips. + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + + var ( + devType string + mode = stat.Mode + ) + + switch mode & unix.S_IFMT { + case unix.S_IFBLK: + devType = blockDevice + case unix.S_IFCHR: + devType = charDevice + case unix.S_IFIFO: + devType = fifoDevice + default: + return nil, ErrNotADevice + } + fm := api.OptionalFileMode{Value: mode &^ unix.S_IFMT} + return &api.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + Uid: &api.OptionalUInt32{Value: stat.Uid}, + Gid: &api.OptionalUInt32{Value: stat.Gid}, + }, nil +} diff --git a/pkg/dra/nri/utils.go b/pkg/dra/nri/utils.go new file mode 100644 index 0000000000..023cc21c8f --- /dev/null +++ b/pkg/dra/nri/utils.go @@ -0,0 +1,135 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package nri + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + resourcev1 "k8s.io/api/resource/v1" + podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" +) + +const ( + defaultKubeletSocket = "kubelet.sock" // which is defined in k8s.io/kubernetes/pkg/kubelet/apis/podresources + kubeletConnectionTimeout = 10 * time.Second + defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb + defaultPodResourcesPath = "/var/lib/kubelet/pod-resources" + unixProtocol = "unix" +) + +// GetResourceClient returns an instance of ResourceClient interface initialized with Pod resource information +func GetKubeletResourceClient() (podresourcesapi.PodResourcesListerClient, *grpc.ClientConn, error) { + kubeletSocketPath := filepath.Join(defaultPodResourcesPath, defaultKubeletSocket) + if !hasKubeletAPIEndpoint(kubeletSocketPath) { + return nil, nil, fmt.Errorf("GetResourceClient: no Kubelet resource API endpoint found") + } + + return getKubeletResourceClient(localEndpoint(kubeletSocketPath)) +} + +// LocalEndpoint returns the full path to a unix socket at the given endpoint +// which is in k8s.io/kubernetes/pkg/kubelet/util +func localEndpoint(path string) string { + return unixProtocol + ":" + path +} + +func getKubeletResourceClient(kubeletSocketURL string) (podresourcesapi.PodResourcesListerClient, *grpc.ClientConn, error) { + conn, err := grpc.NewClient(kubeletSocketURL, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaultPodResourcesMaxSize))) + if err != nil { + return nil, nil, fmt.Errorf("error dialing socket %s: %v", kubeletSocketURL, err) + } + return podresourcesapi.NewPodResourcesListerClient(conn), conn, nil +} + +func hasKubeletAPIEndpoint(path string) bool { + // Check for kubelet resource API socket file + if _, err := os.Stat(path); err != nil { + return false + } + return true +} + +// GetAllNvidiaGpusMap returns a map of GPU index to UUID +func GetAllNvidiaGpusMap() (map[string]string, error) { + nvidiaGpuDirs, err := os.ReadDir(NvidiaDriverGPUPath) + if err != nil { + return nil, err + } + + // Map GPU index/PCI address to UUID + gpuMap := make(map[string]string, len(nvidiaGpuDirs)) + for _, d := range nvidiaGpuDirs { + if !d.IsDir() { + continue + } + + gpuInfoPath := filepath.Join(NvidiaDriverGPUPath, d.Name(), "information") + + // Read the information file + content, err := os.ReadFile(gpuInfoPath) + if err != nil { + return nil, fmt.Errorf("failed to read GPU information file %s: %v", gpuInfoPath, err) + } + + // Parse the content to extract UUID + lines := strings.Split(string(content), "\n") + for _, line := range lines { + if strings.Contains(line, "GPU UUID") { + parts := strings.Split(line, ":") + if len(parts) >= 2 { + uuid := strings.TrimSpace(parts[1]) + // Store both index->UUID and PCI->UUID mappings + gpuMap[uuid] = d.Name() + } + break + } + } + } + + return gpuMap, nil +} + +func IsReadyRdmaResourceDevice(dev resourcev1.Device) bool { + if GetStringValueForAttributes("state", dev.Attributes) != "up" { + return false + } + + if !GetBoolValueForAttributes("rdma", dev.Attributes) { + return false + } + + return true +} + +// GetStringValueForAttributes returns the string value of the attribute +func GetStringValueForAttributes(key resourcev1.QualifiedName, attributes map[resourcev1.QualifiedName]resourcev1.DeviceAttribute) string { + if attributes[key].StringValue != nil { + return *attributes[key].StringValue + } + return "" +} + +// GetBoolValueForAttributes returns the bool value of the attribute +func GetBoolValueForAttributes(key resourcev1.QualifiedName, attributes map[resourcev1.QualifiedName]resourcev1.DeviceAttribute) bool { + if attributes[key].BoolValue != nil { + return *attributes[key].BoolValue + } + return false +} + +// GetIntValueForAttributes returns the int value of the attribute +func GetIntValueForAttributes(key resourcev1.QualifiedName, attributes map[resourcev1.QualifiedName]resourcev1.DeviceAttribute) int64 { + if attributes[key].IntValue != nil { + return *attributes[key].IntValue + } + return 0 +} diff --git a/pkg/dra/utils.go b/pkg/dra/utils.go new file mode 100644 index 0000000000..ab0304ee4c --- /dev/null +++ b/pkg/dra/utils.go @@ -0,0 +1,61 @@ +package dra + +import ( + "net/url" + "regexp" + "strings" + "time" + "unicode" +) + +const ( + defaultKubeletSocket = "kubelet" // which is defined in k8s.io/kubernetes/pkg/kubelet/apis/podresources + kubeletConnectionTimeout = 10 * time.Second + defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb + defaultPodResourcesPath = "/var/lib/kubelet/pod-resources" + unixProtocol = "unix" +) + +// GetPciAddressPrefix returns the prefix of a PCI address +// [domain]:[bus]:[device].[function] -> [domain]:[bus] +// e.g. 0000:af:00.1 -> 0000:af +func GetPciAddressPrefix(pciAddress string) string { + parts := strings.Split(pciAddress, ":") + if len(parts) == 3 { + return parts[0] + ":" + parts[1] + } + return "" +} + +// LocalEndpoint returns the full path to a unix socket at the given endpoint +// which is in k8s.io/kubernetes/pkg/kubelet/util +func localEndpoint(path string) *url.URL { + return &url.URL{ + Scheme: unixProtocol, + Path: path + ".sock", + } +} + +// NormalizedDNS1123Label normalizes the interface name to a valid DNS1123 label +func NormalizedDNS1123Label(iface string) string { + // Convert to lowercase + normalized := strings.ToLower(iface) + // Replace invalid chars with hyphen + reg := regexp.MustCompile("[^a-z0-9-]") + normalized = reg.ReplaceAllString(normalized, "-") + // Remove leading and trailing hyphens + normalized = strings.Trim(normalized, "-") + // Replace multiple consecutive hyphens with a single one + reg = regexp.MustCompile("-+") + normalized = reg.ReplaceAllString(normalized, "-") + + // If the string is empty after normalization, use a default name + if normalized == "" { + normalized = "iface" + } + // If it starts with a number, prefix it + if unicode.IsDigit(rune(normalized[0])) { + normalized = "iface-" + normalized + } + return normalized +} diff --git a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go index f036c4ec8f..697a80e487 100644 --- a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go +++ b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go @@ -2,13 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +kubebuilder:rbac:groups=spiderpool.spidernet.io,resources=spiderippools,verbs=get;list;watch;create;update;patch;delete;deletecollection -// +kubebuilder:rbac:groups=spiderpool.spidernet.io,resources=spidersubnets;spiderendpoints;spiderreservedips;spidermultusconfigs;spiderclaimparameters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=spiderpool.spidernet.io,resources=spidersubnets;spiderendpoints;spiderreservedips;spidermultusconfigs;spidercniconfigs;spiderclaimparameters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=spiderpool.spidernet.io,resources=spidercoordinators,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=spiderpool.spidernet.io,resources=spidersubnets/status;spiderippools/status;spidercoordinators/status,verbs=get;update;patch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="coordination.k8s.io",resources=leases,verbs=create;get;update // +kubebuilder:rbac:groups="apps",resources=statefulsets;deployments;replicasets;daemonsets,verbs=get;list;watch;update -// +kubebuilder:rbac:groups="resource.k8s.io",resources=resourceclaims;resourceclaims/status;podschedulingcontexts/status;resourceclaimtemplates;resourceclasses;podschedulingcontexts,verbs=get;list;patch;watch;update +// +kubebuilder:rbac:groups="resource.k8s.io",resources=resourceclaims;deviceclasses;resourceclaimtemplates,verbs=get;list;watch +// +kubebuilder:rbac:groups="resource.k8s.io",resources=resourceslices,verbs=create;get;list;patch;watch;update;delete // +kubebuilder:rbac:groups="networking.k8s.io",resources=servicecidrs,verbs=get;list;watch // +kubebuilder:rbac:groups="batch",resources=jobs;cronjobs,verbs=get;list;watch;update;delete // +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch diff --git a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercniconfig_types.go b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercniconfig_types.go new file mode 100644 index 0000000000..182e45d46e --- /dev/null +++ b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercniconfig_types.go @@ -0,0 +1,33 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package v2beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:resource:categories={spiderpool},path="spidercniconfigs",scope="Cluster",shortName={scc},singular="spidercniconfig" +// +kubebuilder:object:root=true + +// +genclient +// +genclient:noStatus +type SpiderCNIConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is the specification of the CNI configuration + Spec MultusCNIConfigSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true +type SpiderCNIConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []SpiderCNIConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SpiderCNIConfig{}, &SpiderCNIConfigList{}) +} diff --git a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go index 08d80be4dc..123718441e 100644 --- a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go +++ b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Copyright 2022 Authors of spidernet-io // SPDX-License-Identifier: Apache-2.0 @@ -10,7 +9,7 @@ package v2beta1 import ( "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -512,6 +511,64 @@ func (in *Route) DeepCopy() *Route { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpiderCNIConfig) DeepCopyInto(out *SpiderCNIConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpiderCNIConfig. +func (in *SpiderCNIConfig) DeepCopy() *SpiderCNIConfig { + if in == nil { + return nil + } + out := new(SpiderCNIConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpiderCNIConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpiderCNIConfigList) DeepCopyInto(out *SpiderCNIConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpiderCNIConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpiderCNIConfigList. +func (in *SpiderCNIConfigList) DeepCopy() *SpiderCNIConfigList { + if in == nil { + return nil + } + out := new(SpiderCNIConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpiderCNIConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpiderCoordinator) DeepCopyInto(out *SpiderCoordinator) { *out = *in diff --git a/pkg/k8s/client/clientset/versioned/clientset.go b/pkg/k8s/client/clientset/versioned/clientset.go index 06a71c391f..a338d30fee 100644 --- a/pkg/k8s/client/clientset/versioned/clientset.go +++ b/pkg/k8s/client/clientset/versioned/clientset.go @@ -6,8 +6,8 @@ package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" discovery "k8s.io/client-go/discovery" diff --git a/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go b/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go index d04ca1d2ce..012746cd7c 100644 --- a/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go @@ -9,6 +9,7 @@ import ( clientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" fakespiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -18,8 +19,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// Deprecated: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -32,9 +37,13 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + var opts metav1.ListOptions + if watchAction, ok := action.(testing.WatchActionImpl); ok { + opts = watchAction.ListOptions + } gvr := action.GetResource() ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) + watch, err := o.Watch(gvr, ns, opts) if err != nil { return false, nil, err } @@ -61,6 +70,17 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// IsWatchListSemanticsSupported informs the reflector that this client +// doesn't support WatchList semantics. +// +// This is a synthetic method whose sole purpose is to satisfy the optional +// interface check performed by the reflector. +// Returning true signals that WatchList can NOT be used. +// No additional logic is implemented here. +func (c *Clientset) IsWatchListSemanticsUnSupported() bool { + return true +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercniconfig.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercniconfig.go new file mode 100644 index 0000000000..31d3563685 --- /dev/null +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercniconfig.go @@ -0,0 +1,39 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" + gentype "k8s.io/client-go/gentype" +) + +// fakeSpiderCNIConfigs implements SpiderCNIConfigInterface +type fakeSpiderCNIConfigs struct { + *gentype.FakeClientWithList[*v2beta1.SpiderCNIConfig, *v2beta1.SpiderCNIConfigList] + Fake *FakeSpiderpoolV2beta1 +} + +func newFakeSpiderCNIConfigs(fake *FakeSpiderpoolV2beta1, namespace string) spiderpoolspidernetiov2beta1.SpiderCNIConfigInterface { + return &fakeSpiderCNIConfigs{ + gentype.NewFakeClientWithList[*v2beta1.SpiderCNIConfig, *v2beta1.SpiderCNIConfigList]( + fake.Fake, + namespace, + v2beta1.SchemeGroupVersion.WithResource("spidercniconfigs"), + v2beta1.SchemeGroupVersion.WithKind("SpiderCNIConfig"), + func() *v2beta1.SpiderCNIConfig { return &v2beta1.SpiderCNIConfig{} }, + func() *v2beta1.SpiderCNIConfigList { return &v2beta1.SpiderCNIConfigList{} }, + func(dst, src *v2beta1.SpiderCNIConfigList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta1.SpiderCNIConfigList) []*v2beta1.SpiderCNIConfig { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta1.SpiderCNIConfigList, items []*v2beta1.SpiderCNIConfig) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercoordinator.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercoordinator.go index 247025419d..984679c34c 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercoordinator.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidercoordinator.go @@ -6,114 +6,34 @@ package fake import ( - "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" + gentype "k8s.io/client-go/gentype" ) -// FakeSpiderCoordinators implements SpiderCoordinatorInterface -type FakeSpiderCoordinators struct { +// fakeSpiderCoordinators implements SpiderCoordinatorInterface +type fakeSpiderCoordinators struct { + *gentype.FakeClientWithList[*v2beta1.SpiderCoordinator, *v2beta1.SpiderCoordinatorList] Fake *FakeSpiderpoolV2beta1 } -var spidercoordinatorsResource = v2beta1.SchemeGroupVersion.WithResource("spidercoordinators") - -var spidercoordinatorsKind = v2beta1.SchemeGroupVersion.WithKind("SpiderCoordinator") - -// Get takes name of the spiderCoordinator, and returns the corresponding spiderCoordinator object, and an error if there is any. -func (c *FakeSpiderCoordinators) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderCoordinator, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(spidercoordinatorsResource, name), &v2beta1.SpiderCoordinator{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderCoordinator), err -} - -// List takes label and field selectors, and returns the list of SpiderCoordinators that match those selectors. -func (c *FakeSpiderCoordinators) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderCoordinatorList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(spidercoordinatorsResource, spidercoordinatorsKind, opts), &v2beta1.SpiderCoordinatorList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta1.SpiderCoordinatorList{ListMeta: obj.(*v2beta1.SpiderCoordinatorList).ListMeta} - for _, item := range obj.(*v2beta1.SpiderCoordinatorList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested spiderCoordinators. -func (c *FakeSpiderCoordinators) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(spidercoordinatorsResource, opts)) -} - -// Create takes the representation of a spiderCoordinator and creates it. Returns the server's representation of the spiderCoordinator, and an error, if there is any. -func (c *FakeSpiderCoordinators) Create(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.CreateOptions) (result *v2beta1.SpiderCoordinator, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(spidercoordinatorsResource, spiderCoordinator), &v2beta1.SpiderCoordinator{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderCoordinator), err -} - -// Update takes the representation of a spiderCoordinator and updates it. Returns the server's representation of the spiderCoordinator, and an error, if there is any. -func (c *FakeSpiderCoordinators) Update(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.UpdateOptions) (result *v2beta1.SpiderCoordinator, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(spidercoordinatorsResource, spiderCoordinator), &v2beta1.SpiderCoordinator{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderCoordinator), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeSpiderCoordinators) UpdateStatus(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.UpdateOptions) (*v2beta1.SpiderCoordinator, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(spidercoordinatorsResource, "status", spiderCoordinator), &v2beta1.SpiderCoordinator{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderCoordinator), err -} - -// Delete takes name of the spiderCoordinator and deletes it. Returns an error if one occurs. -func (c *FakeSpiderCoordinators) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(spidercoordinatorsResource, name, opts), &v2beta1.SpiderCoordinator{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeSpiderCoordinators) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(spidercoordinatorsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta1.SpiderCoordinatorList{}) - return err -} - -// Patch applies the patch and returns the patched spiderCoordinator. -func (c *FakeSpiderCoordinators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderCoordinator, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(spidercoordinatorsResource, name, pt, data, subresources...), &v2beta1.SpiderCoordinator{}) - if obj == nil { - return nil, err +func newFakeSpiderCoordinators(fake *FakeSpiderpoolV2beta1) spiderpoolspidernetiov2beta1.SpiderCoordinatorInterface { + return &fakeSpiderCoordinators{ + gentype.NewFakeClientWithList[*v2beta1.SpiderCoordinator, *v2beta1.SpiderCoordinatorList]( + fake.Fake, + "", + v2beta1.SchemeGroupVersion.WithResource("spidercoordinators"), + v2beta1.SchemeGroupVersion.WithKind("SpiderCoordinator"), + func() *v2beta1.SpiderCoordinator { return &v2beta1.SpiderCoordinator{} }, + func() *v2beta1.SpiderCoordinatorList { return &v2beta1.SpiderCoordinatorList{} }, + func(dst, src *v2beta1.SpiderCoordinatorList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta1.SpiderCoordinatorList) []*v2beta1.SpiderCoordinator { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta1.SpiderCoordinatorList, items []*v2beta1.SpiderCoordinator) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2beta1.SpiderCoordinator), err } diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderippool.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderippool.go index 276c537b27..f9bc06cb78 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderippool.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderippool.go @@ -6,114 +6,34 @@ package fake import ( - "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" + gentype "k8s.io/client-go/gentype" ) -// FakeSpiderIPPools implements SpiderIPPoolInterface -type FakeSpiderIPPools struct { +// fakeSpiderIPPools implements SpiderIPPoolInterface +type fakeSpiderIPPools struct { + *gentype.FakeClientWithList[*v2beta1.SpiderIPPool, *v2beta1.SpiderIPPoolList] Fake *FakeSpiderpoolV2beta1 } -var spiderippoolsResource = v2beta1.SchemeGroupVersion.WithResource("spiderippools") - -var spiderippoolsKind = v2beta1.SchemeGroupVersion.WithKind("SpiderIPPool") - -// Get takes name of the spiderIPPool, and returns the corresponding spiderIPPool object, and an error if there is any. -func (c *FakeSpiderIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderIPPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(spiderippoolsResource, name), &v2beta1.SpiderIPPool{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderIPPool), err -} - -// List takes label and field selectors, and returns the list of SpiderIPPools that match those selectors. -func (c *FakeSpiderIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderIPPoolList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(spiderippoolsResource, spiderippoolsKind, opts), &v2beta1.SpiderIPPoolList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta1.SpiderIPPoolList{ListMeta: obj.(*v2beta1.SpiderIPPoolList).ListMeta} - for _, item := range obj.(*v2beta1.SpiderIPPoolList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested spiderIPPools. -func (c *FakeSpiderIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(spiderippoolsResource, opts)) -} - -// Create takes the representation of a spiderIPPool and creates it. Returns the server's representation of the spiderIPPool, and an error, if there is any. -func (c *FakeSpiderIPPools) Create(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.CreateOptions) (result *v2beta1.SpiderIPPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(spiderippoolsResource, spiderIPPool), &v2beta1.SpiderIPPool{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderIPPool), err -} - -// Update takes the representation of a spiderIPPool and updates it. Returns the server's representation of the spiderIPPool, and an error, if there is any. -func (c *FakeSpiderIPPools) Update(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.UpdateOptions) (result *v2beta1.SpiderIPPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(spiderippoolsResource, spiderIPPool), &v2beta1.SpiderIPPool{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderIPPool), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeSpiderIPPools) UpdateStatus(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.UpdateOptions) (*v2beta1.SpiderIPPool, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(spiderippoolsResource, "status", spiderIPPool), &v2beta1.SpiderIPPool{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderIPPool), err -} - -// Delete takes name of the spiderIPPool and deletes it. Returns an error if one occurs. -func (c *FakeSpiderIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(spiderippoolsResource, name, opts), &v2beta1.SpiderIPPool{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeSpiderIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(spiderippoolsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta1.SpiderIPPoolList{}) - return err -} - -// Patch applies the patch and returns the patched spiderIPPool. -func (c *FakeSpiderIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderIPPool, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(spiderippoolsResource, name, pt, data, subresources...), &v2beta1.SpiderIPPool{}) - if obj == nil { - return nil, err +func newFakeSpiderIPPools(fake *FakeSpiderpoolV2beta1) spiderpoolspidernetiov2beta1.SpiderIPPoolInterface { + return &fakeSpiderIPPools{ + gentype.NewFakeClientWithList[*v2beta1.SpiderIPPool, *v2beta1.SpiderIPPoolList]( + fake.Fake, + "", + v2beta1.SchemeGroupVersion.WithResource("spiderippools"), + v2beta1.SchemeGroupVersion.WithKind("SpiderIPPool"), + func() *v2beta1.SpiderIPPool { return &v2beta1.SpiderIPPool{} }, + func() *v2beta1.SpiderIPPoolList { return &v2beta1.SpiderIPPoolList{} }, + func(dst, src *v2beta1.SpiderIPPoolList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta1.SpiderIPPoolList) []*v2beta1.SpiderIPPool { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta1.SpiderIPPoolList, items []*v2beta1.SpiderIPPool) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2beta1.SpiderIPPool), err } diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidermultusconfig.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidermultusconfig.go index 9f1acdef04..ca2feddc69 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidermultusconfig.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidermultusconfig.go @@ -6,111 +6,34 @@ package fake import ( - "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" + gentype "k8s.io/client-go/gentype" ) -// FakeSpiderMultusConfigs implements SpiderMultusConfigInterface -type FakeSpiderMultusConfigs struct { +// fakeSpiderMultusConfigs implements SpiderMultusConfigInterface +type fakeSpiderMultusConfigs struct { + *gentype.FakeClientWithList[*v2beta1.SpiderMultusConfig, *v2beta1.SpiderMultusConfigList] Fake *FakeSpiderpoolV2beta1 - ns string -} - -var spidermultusconfigsResource = v2beta1.SchemeGroupVersion.WithResource("spidermultusconfigs") - -var spidermultusconfigsKind = v2beta1.SchemeGroupVersion.WithKind("SpiderMultusConfig") - -// Get takes name of the spiderMultusConfig, and returns the corresponding spiderMultusConfig object, and an error if there is any. -func (c *FakeSpiderMultusConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderMultusConfig, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(spidermultusconfigsResource, c.ns, name), &v2beta1.SpiderMultusConfig{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderMultusConfig), err -} - -// List takes label and field selectors, and returns the list of SpiderMultusConfigs that match those selectors. -func (c *FakeSpiderMultusConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderMultusConfigList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(spidermultusconfigsResource, spidermultusconfigsKind, c.ns, opts), &v2beta1.SpiderMultusConfigList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta1.SpiderMultusConfigList{ListMeta: obj.(*v2beta1.SpiderMultusConfigList).ListMeta} - for _, item := range obj.(*v2beta1.SpiderMultusConfigList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested spiderMultusConfigs. -func (c *FakeSpiderMultusConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(spidermultusconfigsResource, c.ns, opts)) - -} - -// Create takes the representation of a spiderMultusConfig and creates it. Returns the server's representation of the spiderMultusConfig, and an error, if there is any. -func (c *FakeSpiderMultusConfigs) Create(ctx context.Context, spiderMultusConfig *v2beta1.SpiderMultusConfig, opts v1.CreateOptions) (result *v2beta1.SpiderMultusConfig, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(spidermultusconfigsResource, c.ns, spiderMultusConfig), &v2beta1.SpiderMultusConfig{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderMultusConfig), err -} - -// Update takes the representation of a spiderMultusConfig and updates it. Returns the server's representation of the spiderMultusConfig, and an error, if there is any. -func (c *FakeSpiderMultusConfigs) Update(ctx context.Context, spiderMultusConfig *v2beta1.SpiderMultusConfig, opts v1.UpdateOptions) (result *v2beta1.SpiderMultusConfig, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(spidermultusconfigsResource, c.ns, spiderMultusConfig), &v2beta1.SpiderMultusConfig{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderMultusConfig), err -} - -// Delete takes name of the spiderMultusConfig and deletes it. Returns an error if one occurs. -func (c *FakeSpiderMultusConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(spidermultusconfigsResource, c.ns, name, opts), &v2beta1.SpiderMultusConfig{}) - - return err } -// DeleteCollection deletes a collection of objects. -func (c *FakeSpiderMultusConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(spidermultusconfigsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta1.SpiderMultusConfigList{}) - return err -} - -// Patch applies the patch and returns the patched spiderMultusConfig. -func (c *FakeSpiderMultusConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderMultusConfig, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(spidermultusconfigsResource, c.ns, name, pt, data, subresources...), &v2beta1.SpiderMultusConfig{}) - - if obj == nil { - return nil, err +func newFakeSpiderMultusConfigs(fake *FakeSpiderpoolV2beta1, namespace string) spiderpoolspidernetiov2beta1.SpiderMultusConfigInterface { + return &fakeSpiderMultusConfigs{ + gentype.NewFakeClientWithList[*v2beta1.SpiderMultusConfig, *v2beta1.SpiderMultusConfigList]( + fake.Fake, + namespace, + v2beta1.SchemeGroupVersion.WithResource("spidermultusconfigs"), + v2beta1.SchemeGroupVersion.WithKind("SpiderMultusConfig"), + func() *v2beta1.SpiderMultusConfig { return &v2beta1.SpiderMultusConfig{} }, + func() *v2beta1.SpiderMultusConfigList { return &v2beta1.SpiderMultusConfigList{} }, + func(dst, src *v2beta1.SpiderMultusConfigList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta1.SpiderMultusConfigList) []*v2beta1.SpiderMultusConfig { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta1.SpiderMultusConfigList, items []*v2beta1.SpiderMultusConfig) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2beta1.SpiderMultusConfig), err } diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderpool.spidernet.io_client.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderpool.spidernet.io_client.go index c4fff8f10b..1338157281 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderpool.spidernet.io_client.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spiderpool.spidernet.io_client.go @@ -15,20 +15,24 @@ type FakeSpiderpoolV2beta1 struct { *testing.Fake } +func (c *FakeSpiderpoolV2beta1) SpiderCNIConfigs(namespace string) v2beta1.SpiderCNIConfigInterface { + return newFakeSpiderCNIConfigs(c, namespace) +} + func (c *FakeSpiderpoolV2beta1) SpiderCoordinators() v2beta1.SpiderCoordinatorInterface { - return &FakeSpiderCoordinators{c} + return newFakeSpiderCoordinators(c) } func (c *FakeSpiderpoolV2beta1) SpiderIPPools() v2beta1.SpiderIPPoolInterface { - return &FakeSpiderIPPools{c} + return newFakeSpiderIPPools(c) } func (c *FakeSpiderpoolV2beta1) SpiderMultusConfigs(namespace string) v2beta1.SpiderMultusConfigInterface { - return &FakeSpiderMultusConfigs{c, namespace} + return newFakeSpiderMultusConfigs(c, namespace) } func (c *FakeSpiderpoolV2beta1) SpiderSubnets() v2beta1.SpiderSubnetInterface { - return &FakeSpiderSubnets{c} + return newFakeSpiderSubnets(c) } // RESTClient returns a RESTClient that is used to communicate diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidersubnet.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidersubnet.go index 04e05e62b7..11d5e4ab44 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidersubnet.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/fake/fake_spidersubnet.go @@ -6,114 +6,34 @@ package fake import ( - "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1" + gentype "k8s.io/client-go/gentype" ) -// FakeSpiderSubnets implements SpiderSubnetInterface -type FakeSpiderSubnets struct { +// fakeSpiderSubnets implements SpiderSubnetInterface +type fakeSpiderSubnets struct { + *gentype.FakeClientWithList[*v2beta1.SpiderSubnet, *v2beta1.SpiderSubnetList] Fake *FakeSpiderpoolV2beta1 } -var spidersubnetsResource = v2beta1.SchemeGroupVersion.WithResource("spidersubnets") - -var spidersubnetsKind = v2beta1.SchemeGroupVersion.WithKind("SpiderSubnet") - -// Get takes name of the spiderSubnet, and returns the corresponding spiderSubnet object, and an error if there is any. -func (c *FakeSpiderSubnets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderSubnet, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(spidersubnetsResource, name), &v2beta1.SpiderSubnet{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderSubnet), err -} - -// List takes label and field selectors, and returns the list of SpiderSubnets that match those selectors. -func (c *FakeSpiderSubnets) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderSubnetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(spidersubnetsResource, spidersubnetsKind, opts), &v2beta1.SpiderSubnetList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta1.SpiderSubnetList{ListMeta: obj.(*v2beta1.SpiderSubnetList).ListMeta} - for _, item := range obj.(*v2beta1.SpiderSubnetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested spiderSubnets. -func (c *FakeSpiderSubnets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(spidersubnetsResource, opts)) -} - -// Create takes the representation of a spiderSubnet and creates it. Returns the server's representation of the spiderSubnet, and an error, if there is any. -func (c *FakeSpiderSubnets) Create(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.CreateOptions) (result *v2beta1.SpiderSubnet, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(spidersubnetsResource, spiderSubnet), &v2beta1.SpiderSubnet{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderSubnet), err -} - -// Update takes the representation of a spiderSubnet and updates it. Returns the server's representation of the spiderSubnet, and an error, if there is any. -func (c *FakeSpiderSubnets) Update(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.UpdateOptions) (result *v2beta1.SpiderSubnet, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(spidersubnetsResource, spiderSubnet), &v2beta1.SpiderSubnet{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderSubnet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeSpiderSubnets) UpdateStatus(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.UpdateOptions) (*v2beta1.SpiderSubnet, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(spidersubnetsResource, "status", spiderSubnet), &v2beta1.SpiderSubnet{}) - if obj == nil { - return nil, err - } - return obj.(*v2beta1.SpiderSubnet), err -} - -// Delete takes name of the spiderSubnet and deletes it. Returns an error if one occurs. -func (c *FakeSpiderSubnets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(spidersubnetsResource, name, opts), &v2beta1.SpiderSubnet{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeSpiderSubnets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(spidersubnetsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta1.SpiderSubnetList{}) - return err -} - -// Patch applies the patch and returns the patched spiderSubnet. -func (c *FakeSpiderSubnets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderSubnet, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(spidersubnetsResource, name, pt, data, subresources...), &v2beta1.SpiderSubnet{}) - if obj == nil { - return nil, err +func newFakeSpiderSubnets(fake *FakeSpiderpoolV2beta1) spiderpoolspidernetiov2beta1.SpiderSubnetInterface { + return &fakeSpiderSubnets{ + gentype.NewFakeClientWithList[*v2beta1.SpiderSubnet, *v2beta1.SpiderSubnetList]( + fake.Fake, + "", + v2beta1.SchemeGroupVersion.WithResource("spidersubnets"), + v2beta1.SchemeGroupVersion.WithKind("SpiderSubnet"), + func() *v2beta1.SpiderSubnet { return &v2beta1.SpiderSubnet{} }, + func() *v2beta1.SpiderSubnetList { return &v2beta1.SpiderSubnetList{} }, + func(dst, src *v2beta1.SpiderSubnetList) { dst.ListMeta = src.ListMeta }, + func(list *v2beta1.SpiderSubnetList) []*v2beta1.SpiderSubnet { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v2beta1.SpiderSubnetList, items []*v2beta1.SpiderSubnet) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v2beta1.SpiderSubnet), err } diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/generated_expansion.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/generated_expansion.go index 39ae92a22c..5a21d250f0 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/generated_expansion.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/generated_expansion.go @@ -5,6 +5,8 @@ package v2beta1 +type SpiderCNIConfigExpansion interface{} + type SpiderCoordinatorExpansion interface{} type SpiderIPPoolExpansion interface{} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercniconfig.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercniconfig.go new file mode 100644 index 0000000000..6f9d4b54b7 --- /dev/null +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercniconfig.go @@ -0,0 +1,59 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 + +import ( + context "context" + + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + scheme "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// SpiderCNIConfigsGetter has a method to return a SpiderCNIConfigInterface. +// A group's client should implement this interface. +type SpiderCNIConfigsGetter interface { + SpiderCNIConfigs(namespace string) SpiderCNIConfigInterface +} + +// SpiderCNIConfigInterface has methods to work with SpiderCNIConfig resources. +type SpiderCNIConfigInterface interface { + Create(ctx context.Context, spiderCNIConfig *spiderpoolspidernetiov2beta1.SpiderCNIConfig, opts v1.CreateOptions) (*spiderpoolspidernetiov2beta1.SpiderCNIConfig, error) + Update(ctx context.Context, spiderCNIConfig *spiderpoolspidernetiov2beta1.SpiderCNIConfig, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderCNIConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*spiderpoolspidernetiov2beta1.SpiderCNIConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*spiderpoolspidernetiov2beta1.SpiderCNIConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *spiderpoolspidernetiov2beta1.SpiderCNIConfig, err error) + SpiderCNIConfigExpansion +} + +// spiderCNIConfigs implements SpiderCNIConfigInterface +type spiderCNIConfigs struct { + *gentype.ClientWithList[*spiderpoolspidernetiov2beta1.SpiderCNIConfig, *spiderpoolspidernetiov2beta1.SpiderCNIConfigList] +} + +// newSpiderCNIConfigs returns a SpiderCNIConfigs +func newSpiderCNIConfigs(c *SpiderpoolV2beta1Client, namespace string) *spiderCNIConfigs { + return &spiderCNIConfigs{ + gentype.NewClientWithList[*spiderpoolspidernetiov2beta1.SpiderCNIConfig, *spiderpoolspidernetiov2beta1.SpiderCNIConfigList]( + "spidercniconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *spiderpoolspidernetiov2beta1.SpiderCNIConfig { + return &spiderpoolspidernetiov2beta1.SpiderCNIConfig{} + }, + func() *spiderpoolspidernetiov2beta1.SpiderCNIConfigList { + return &spiderpoolspidernetiov2beta1.SpiderCNIConfigList{} + }, + ), + } +} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercoordinator.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercoordinator.go index 503e0b8942..beceaecafc 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercoordinator.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidercoordinator.go @@ -6,15 +6,14 @@ package v2beta1 import ( - "context" - "time" + context "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" scheme "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // SpiderCoordinatorsGetter has a method to return a SpiderCoordinatorInterface. @@ -25,147 +24,38 @@ type SpiderCoordinatorsGetter interface { // SpiderCoordinatorInterface has methods to work with SpiderCoordinator resources. type SpiderCoordinatorInterface interface { - Create(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.CreateOptions) (*v2beta1.SpiderCoordinator, error) - Update(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.UpdateOptions) (*v2beta1.SpiderCoordinator, error) - UpdateStatus(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.UpdateOptions) (*v2beta1.SpiderCoordinator, error) + Create(ctx context.Context, spiderCoordinator *spiderpoolspidernetiov2beta1.SpiderCoordinator, opts v1.CreateOptions) (*spiderpoolspidernetiov2beta1.SpiderCoordinator, error) + Update(ctx context.Context, spiderCoordinator *spiderpoolspidernetiov2beta1.SpiderCoordinator, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderCoordinator, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, spiderCoordinator *spiderpoolspidernetiov2beta1.SpiderCoordinator, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderCoordinator, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.SpiderCoordinator, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta1.SpiderCoordinatorList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*spiderpoolspidernetiov2beta1.SpiderCoordinator, error) + List(ctx context.Context, opts v1.ListOptions) (*spiderpoolspidernetiov2beta1.SpiderCoordinatorList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderCoordinator, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *spiderpoolspidernetiov2beta1.SpiderCoordinator, err error) SpiderCoordinatorExpansion } // spiderCoordinators implements SpiderCoordinatorInterface type spiderCoordinators struct { - client rest.Interface + *gentype.ClientWithList[*spiderpoolspidernetiov2beta1.SpiderCoordinator, *spiderpoolspidernetiov2beta1.SpiderCoordinatorList] } // newSpiderCoordinators returns a SpiderCoordinators func newSpiderCoordinators(c *SpiderpoolV2beta1Client) *spiderCoordinators { return &spiderCoordinators{ - client: c.RESTClient(), + gentype.NewClientWithList[*spiderpoolspidernetiov2beta1.SpiderCoordinator, *spiderpoolspidernetiov2beta1.SpiderCoordinatorList]( + "spidercoordinators", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *spiderpoolspidernetiov2beta1.SpiderCoordinator { + return &spiderpoolspidernetiov2beta1.SpiderCoordinator{} + }, + func() *spiderpoolspidernetiov2beta1.SpiderCoordinatorList { + return &spiderpoolspidernetiov2beta1.SpiderCoordinatorList{} + }, + ), } } - -// Get takes name of the spiderCoordinator, and returns the corresponding spiderCoordinator object, and an error if there is any. -func (c *spiderCoordinators) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderCoordinator, err error) { - result = &v2beta1.SpiderCoordinator{} - err = c.client.Get(). - Resource("spidercoordinators"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of SpiderCoordinators that match those selectors. -func (c *spiderCoordinators) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderCoordinatorList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.SpiderCoordinatorList{} - err = c.client.Get(). - Resource("spidercoordinators"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested spiderCoordinators. -func (c *spiderCoordinators) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("spidercoordinators"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a spiderCoordinator and creates it. Returns the server's representation of the spiderCoordinator, and an error, if there is any. -func (c *spiderCoordinators) Create(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.CreateOptions) (result *v2beta1.SpiderCoordinator, err error) { - result = &v2beta1.SpiderCoordinator{} - err = c.client.Post(). - Resource("spidercoordinators"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderCoordinator). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a spiderCoordinator and updates it. Returns the server's representation of the spiderCoordinator, and an error, if there is any. -func (c *spiderCoordinators) Update(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.UpdateOptions) (result *v2beta1.SpiderCoordinator, err error) { - result = &v2beta1.SpiderCoordinator{} - err = c.client.Put(). - Resource("spidercoordinators"). - Name(spiderCoordinator.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderCoordinator). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *spiderCoordinators) UpdateStatus(ctx context.Context, spiderCoordinator *v2beta1.SpiderCoordinator, opts v1.UpdateOptions) (result *v2beta1.SpiderCoordinator, err error) { - result = &v2beta1.SpiderCoordinator{} - err = c.client.Put(). - Resource("spidercoordinators"). - Name(spiderCoordinator.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderCoordinator). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the spiderCoordinator and deletes it. Returns an error if one occurs. -func (c *spiderCoordinators) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("spidercoordinators"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *spiderCoordinators) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("spidercoordinators"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched spiderCoordinator. -func (c *spiderCoordinators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderCoordinator, err error) { - result = &v2beta1.SpiderCoordinator{} - err = c.client.Patch(pt). - Resource("spidercoordinators"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderippool.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderippool.go index 12cd9dba4f..6f60c83d8c 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderippool.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderippool.go @@ -6,15 +6,14 @@ package v2beta1 import ( - "context" - "time" + context "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" scheme "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // SpiderIPPoolsGetter has a method to return a SpiderIPPoolInterface. @@ -25,147 +24,36 @@ type SpiderIPPoolsGetter interface { // SpiderIPPoolInterface has methods to work with SpiderIPPool resources. type SpiderIPPoolInterface interface { - Create(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.CreateOptions) (*v2beta1.SpiderIPPool, error) - Update(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.UpdateOptions) (*v2beta1.SpiderIPPool, error) - UpdateStatus(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.UpdateOptions) (*v2beta1.SpiderIPPool, error) + Create(ctx context.Context, spiderIPPool *spiderpoolspidernetiov2beta1.SpiderIPPool, opts v1.CreateOptions) (*spiderpoolspidernetiov2beta1.SpiderIPPool, error) + Update(ctx context.Context, spiderIPPool *spiderpoolspidernetiov2beta1.SpiderIPPool, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderIPPool, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, spiderIPPool *spiderpoolspidernetiov2beta1.SpiderIPPool, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderIPPool, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.SpiderIPPool, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta1.SpiderIPPoolList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*spiderpoolspidernetiov2beta1.SpiderIPPool, error) + List(ctx context.Context, opts v1.ListOptions) (*spiderpoolspidernetiov2beta1.SpiderIPPoolList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderIPPool, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *spiderpoolspidernetiov2beta1.SpiderIPPool, err error) SpiderIPPoolExpansion } // spiderIPPools implements SpiderIPPoolInterface type spiderIPPools struct { - client rest.Interface + *gentype.ClientWithList[*spiderpoolspidernetiov2beta1.SpiderIPPool, *spiderpoolspidernetiov2beta1.SpiderIPPoolList] } // newSpiderIPPools returns a SpiderIPPools func newSpiderIPPools(c *SpiderpoolV2beta1Client) *spiderIPPools { return &spiderIPPools{ - client: c.RESTClient(), + gentype.NewClientWithList[*spiderpoolspidernetiov2beta1.SpiderIPPool, *spiderpoolspidernetiov2beta1.SpiderIPPoolList]( + "spiderippools", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *spiderpoolspidernetiov2beta1.SpiderIPPool { return &spiderpoolspidernetiov2beta1.SpiderIPPool{} }, + func() *spiderpoolspidernetiov2beta1.SpiderIPPoolList { + return &spiderpoolspidernetiov2beta1.SpiderIPPoolList{} + }, + ), } } - -// Get takes name of the spiderIPPool, and returns the corresponding spiderIPPool object, and an error if there is any. -func (c *spiderIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderIPPool, err error) { - result = &v2beta1.SpiderIPPool{} - err = c.client.Get(). - Resource("spiderippools"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of SpiderIPPools that match those selectors. -func (c *spiderIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderIPPoolList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.SpiderIPPoolList{} - err = c.client.Get(). - Resource("spiderippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested spiderIPPools. -func (c *spiderIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("spiderippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a spiderIPPool and creates it. Returns the server's representation of the spiderIPPool, and an error, if there is any. -func (c *spiderIPPools) Create(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.CreateOptions) (result *v2beta1.SpiderIPPool, err error) { - result = &v2beta1.SpiderIPPool{} - err = c.client.Post(). - Resource("spiderippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderIPPool). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a spiderIPPool and updates it. Returns the server's representation of the spiderIPPool, and an error, if there is any. -func (c *spiderIPPools) Update(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.UpdateOptions) (result *v2beta1.SpiderIPPool, err error) { - result = &v2beta1.SpiderIPPool{} - err = c.client.Put(). - Resource("spiderippools"). - Name(spiderIPPool.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderIPPool). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *spiderIPPools) UpdateStatus(ctx context.Context, spiderIPPool *v2beta1.SpiderIPPool, opts v1.UpdateOptions) (result *v2beta1.SpiderIPPool, err error) { - result = &v2beta1.SpiderIPPool{} - err = c.client.Put(). - Resource("spiderippools"). - Name(spiderIPPool.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderIPPool). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the spiderIPPool and deletes it. Returns an error if one occurs. -func (c *spiderIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("spiderippools"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *spiderIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("spiderippools"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched spiderIPPool. -func (c *spiderIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderIPPool, err error) { - result = &v2beta1.SpiderIPPool{} - err = c.client.Patch(pt). - Resource("spiderippools"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go index e2f54dcc1b..8089847ff0 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go @@ -6,15 +6,14 @@ package v2beta1 import ( - "context" - "time" + context "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" scheme "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // SpiderMultusConfigsGetter has a method to return a SpiderMultusConfigInterface. @@ -25,141 +24,36 @@ type SpiderMultusConfigsGetter interface { // SpiderMultusConfigInterface has methods to work with SpiderMultusConfig resources. type SpiderMultusConfigInterface interface { - Create(ctx context.Context, spiderMultusConfig *v2beta1.SpiderMultusConfig, opts v1.CreateOptions) (*v2beta1.SpiderMultusConfig, error) - Update(ctx context.Context, spiderMultusConfig *v2beta1.SpiderMultusConfig, opts v1.UpdateOptions) (*v2beta1.SpiderMultusConfig, error) + Create(ctx context.Context, spiderMultusConfig *spiderpoolspidernetiov2beta1.SpiderMultusConfig, opts v1.CreateOptions) (*spiderpoolspidernetiov2beta1.SpiderMultusConfig, error) + Update(ctx context.Context, spiderMultusConfig *spiderpoolspidernetiov2beta1.SpiderMultusConfig, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderMultusConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.SpiderMultusConfig, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta1.SpiderMultusConfigList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*spiderpoolspidernetiov2beta1.SpiderMultusConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*spiderpoolspidernetiov2beta1.SpiderMultusConfigList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderMultusConfig, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *spiderpoolspidernetiov2beta1.SpiderMultusConfig, err error) SpiderMultusConfigExpansion } // spiderMultusConfigs implements SpiderMultusConfigInterface type spiderMultusConfigs struct { - client rest.Interface - ns string + *gentype.ClientWithList[*spiderpoolspidernetiov2beta1.SpiderMultusConfig, *spiderpoolspidernetiov2beta1.SpiderMultusConfigList] } // newSpiderMultusConfigs returns a SpiderMultusConfigs func newSpiderMultusConfigs(c *SpiderpoolV2beta1Client, namespace string) *spiderMultusConfigs { return &spiderMultusConfigs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*spiderpoolspidernetiov2beta1.SpiderMultusConfig, *spiderpoolspidernetiov2beta1.SpiderMultusConfigList]( + "spidermultusconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *spiderpoolspidernetiov2beta1.SpiderMultusConfig { + return &spiderpoolspidernetiov2beta1.SpiderMultusConfig{} + }, + func() *spiderpoolspidernetiov2beta1.SpiderMultusConfigList { + return &spiderpoolspidernetiov2beta1.SpiderMultusConfigList{} + }, + ), } } - -// Get takes name of the spiderMultusConfig, and returns the corresponding spiderMultusConfig object, and an error if there is any. -func (c *spiderMultusConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderMultusConfig, err error) { - result = &v2beta1.SpiderMultusConfig{} - err = c.client.Get(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of SpiderMultusConfigs that match those selectors. -func (c *spiderMultusConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderMultusConfigList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.SpiderMultusConfigList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested spiderMultusConfigs. -func (c *spiderMultusConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a spiderMultusConfig and creates it. Returns the server's representation of the spiderMultusConfig, and an error, if there is any. -func (c *spiderMultusConfigs) Create(ctx context.Context, spiderMultusConfig *v2beta1.SpiderMultusConfig, opts v1.CreateOptions) (result *v2beta1.SpiderMultusConfig, err error) { - result = &v2beta1.SpiderMultusConfig{} - err = c.client.Post(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderMultusConfig). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a spiderMultusConfig and updates it. Returns the server's representation of the spiderMultusConfig, and an error, if there is any. -func (c *spiderMultusConfigs) Update(ctx context.Context, spiderMultusConfig *v2beta1.SpiderMultusConfig, opts v1.UpdateOptions) (result *v2beta1.SpiderMultusConfig, err error) { - result = &v2beta1.SpiderMultusConfig{} - err = c.client.Put(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - Name(spiderMultusConfig.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderMultusConfig). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the spiderMultusConfig and deletes it. Returns an error if one occurs. -func (c *spiderMultusConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *spiderMultusConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("spidermultusconfigs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched spiderMultusConfig. -func (c *spiderMultusConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderMultusConfig, err error) { - result = &v2beta1.SpiderMultusConfig{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("spidermultusconfigs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderpool.spidernet.io_client.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderpool.spidernet.io_client.go index d81e8cf56f..4da45f0193 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderpool.spidernet.io_client.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spiderpool.spidernet.io_client.go @@ -6,15 +6,16 @@ package v2beta1 import ( - "net/http" + http "net/http" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + scheme "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) type SpiderpoolV2beta1Interface interface { RESTClient() rest.Interface + SpiderCNIConfigsGetter SpiderCoordinatorsGetter SpiderIPPoolsGetter SpiderMultusConfigsGetter @@ -26,6 +27,10 @@ type SpiderpoolV2beta1Client struct { restClient rest.Interface } +func (c *SpiderpoolV2beta1Client) SpiderCNIConfigs(namespace string) SpiderCNIConfigInterface { + return newSpiderCNIConfigs(c, namespace) +} + func (c *SpiderpoolV2beta1Client) SpiderCoordinators() SpiderCoordinatorInterface { return newSpiderCoordinators(c) } @@ -47,9 +52,7 @@ func (c *SpiderpoolV2beta1Client) SpiderSubnets() SpiderSubnetInterface { // where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*SpiderpoolV2beta1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err @@ -61,9 +64,7 @@ func NewForConfig(c *rest.Config) (*SpiderpoolV2beta1Client, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SpiderpoolV2beta1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err @@ -86,17 +87,15 @@ func New(c rest.Interface) *SpiderpoolV2beta1Client { return &SpiderpoolV2beta1Client{c} } -func setConfigDefaults(config *rest.Config) error { - gv := v2beta1.SchemeGroupVersion +func setConfigDefaults(config *rest.Config) { + gv := spiderpoolspidernetiov2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - - return nil } // RESTClient returns a RESTClient that is used to communicate diff --git a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidersubnet.go b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidersubnet.go index f129c01d1b..7cfea4e10c 100644 --- a/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidersubnet.go +++ b/pkg/k8s/client/clientset/versioned/typed/spiderpool.spidernet.io/v2beta1/spidersubnet.go @@ -6,15 +6,14 @@ package v2beta1 import ( - "context" - "time" + context "context" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" scheme "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // SpiderSubnetsGetter has a method to return a SpiderSubnetInterface. @@ -25,147 +24,36 @@ type SpiderSubnetsGetter interface { // SpiderSubnetInterface has methods to work with SpiderSubnet resources. type SpiderSubnetInterface interface { - Create(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.CreateOptions) (*v2beta1.SpiderSubnet, error) - Update(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.UpdateOptions) (*v2beta1.SpiderSubnet, error) - UpdateStatus(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.UpdateOptions) (*v2beta1.SpiderSubnet, error) + Create(ctx context.Context, spiderSubnet *spiderpoolspidernetiov2beta1.SpiderSubnet, opts v1.CreateOptions) (*spiderpoolspidernetiov2beta1.SpiderSubnet, error) + Update(ctx context.Context, spiderSubnet *spiderpoolspidernetiov2beta1.SpiderSubnet, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderSubnet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, spiderSubnet *spiderpoolspidernetiov2beta1.SpiderSubnet, opts v1.UpdateOptions) (*spiderpoolspidernetiov2beta1.SpiderSubnet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.SpiderSubnet, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta1.SpiderSubnetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*spiderpoolspidernetiov2beta1.SpiderSubnet, error) + List(ctx context.Context, opts v1.ListOptions) (*spiderpoolspidernetiov2beta1.SpiderSubnetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderSubnet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *spiderpoolspidernetiov2beta1.SpiderSubnet, err error) SpiderSubnetExpansion } // spiderSubnets implements SpiderSubnetInterface type spiderSubnets struct { - client rest.Interface + *gentype.ClientWithList[*spiderpoolspidernetiov2beta1.SpiderSubnet, *spiderpoolspidernetiov2beta1.SpiderSubnetList] } // newSpiderSubnets returns a SpiderSubnets func newSpiderSubnets(c *SpiderpoolV2beta1Client) *spiderSubnets { return &spiderSubnets{ - client: c.RESTClient(), + gentype.NewClientWithList[*spiderpoolspidernetiov2beta1.SpiderSubnet, *spiderpoolspidernetiov2beta1.SpiderSubnetList]( + "spidersubnets", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *spiderpoolspidernetiov2beta1.SpiderSubnet { return &spiderpoolspidernetiov2beta1.SpiderSubnet{} }, + func() *spiderpoolspidernetiov2beta1.SpiderSubnetList { + return &spiderpoolspidernetiov2beta1.SpiderSubnetList{} + }, + ), } } - -// Get takes name of the spiderSubnet, and returns the corresponding spiderSubnet object, and an error if there is any. -func (c *spiderSubnets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.SpiderSubnet, err error) { - result = &v2beta1.SpiderSubnet{} - err = c.client.Get(). - Resource("spidersubnets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of SpiderSubnets that match those selectors. -func (c *spiderSubnets) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.SpiderSubnetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.SpiderSubnetList{} - err = c.client.Get(). - Resource("spidersubnets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested spiderSubnets. -func (c *spiderSubnets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("spidersubnets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a spiderSubnet and creates it. Returns the server's representation of the spiderSubnet, and an error, if there is any. -func (c *spiderSubnets) Create(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.CreateOptions) (result *v2beta1.SpiderSubnet, err error) { - result = &v2beta1.SpiderSubnet{} - err = c.client.Post(). - Resource("spidersubnets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderSubnet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a spiderSubnet and updates it. Returns the server's representation of the spiderSubnet, and an error, if there is any. -func (c *spiderSubnets) Update(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.UpdateOptions) (result *v2beta1.SpiderSubnet, err error) { - result = &v2beta1.SpiderSubnet{} - err = c.client.Put(). - Resource("spidersubnets"). - Name(spiderSubnet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderSubnet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *spiderSubnets) UpdateStatus(ctx context.Context, spiderSubnet *v2beta1.SpiderSubnet, opts v1.UpdateOptions) (result *v2beta1.SpiderSubnet, err error) { - result = &v2beta1.SpiderSubnet{} - err = c.client.Put(). - Resource("spidersubnets"). - Name(spiderSubnet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(spiderSubnet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the spiderSubnet and deletes it. Returns an error if one occurs. -func (c *spiderSubnets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("spidersubnets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *spiderSubnets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("spidersubnets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched spiderSubnet. -func (c *spiderSubnets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.SpiderSubnet, err error) { - result = &v2beta1.SpiderSubnet{} - err = c.client.Patch(pt). - Resource("spidersubnets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/k8s/client/informers/externalversions/factory.go b/pkg/k8s/client/informers/externalversions/factory.go index 7b42a53bc8..d1c3f4f953 100644 --- a/pkg/k8s/client/informers/externalversions/factory.go +++ b/pkg/k8s/client/informers/externalversions/factory.go @@ -84,6 +84,7 @@ func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Dur // NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. // Listers obtained via this SharedInformerFactory will be subject to the same filters // as specified here. +// // Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) @@ -191,7 +192,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // // It is typically used like this: // -// ctx, cancel := context.Background() +// ctx, cancel := context.WithCancel(context.Background()) // defer cancel() // factory := NewSharedInformerFactory(client, resyncPeriod) // defer factory.WaitForStop() // Returns immediately if nothing was started. @@ -215,6 +216,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/pkg/k8s/client/informers/externalversions/generic.go b/pkg/k8s/client/informers/externalversions/generic.go index c7cc59245c..373fcf52aa 100644 --- a/pkg/k8s/client/informers/externalversions/generic.go +++ b/pkg/k8s/client/informers/externalversions/generic.go @@ -6,7 +6,7 @@ package externalversions import ( - "fmt" + fmt "fmt" v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -40,6 +40,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=spiderpool.spidernet.io, Version=v2beta1 + case v2beta1.SchemeGroupVersion.WithResource("spidercniconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Spiderpool().V2beta1().SpiderCNIConfigs().Informer()}, nil case v2beta1.SchemeGroupVersion.WithResource("spidercoordinators"): return &genericInformer{resource: resource.GroupResource(), informer: f.Spiderpool().V2beta1().SpiderCoordinators().Informer()}, nil case v2beta1.SchemeGroupVersion.WithResource("spiderippools"): diff --git a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/interface.go b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/interface.go index 0e396d97ad..52f60856a6 100644 --- a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/interface.go +++ b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/interface.go @@ -11,6 +11,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // SpiderCNIConfigs returns a SpiderCNIConfigInformer. + SpiderCNIConfigs() SpiderCNIConfigInformer // SpiderCoordinators returns a SpiderCoordinatorInformer. SpiderCoordinators() SpiderCoordinatorInformer // SpiderIPPools returns a SpiderIPPoolInformer. @@ -32,6 +34,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// SpiderCNIConfigs returns a SpiderCNIConfigInformer. +func (v *version) SpiderCNIConfigs() SpiderCNIConfigInformer { + return &spiderCNIConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // SpiderCoordinators returns a SpiderCoordinatorInformer. func (v *version) SpiderCoordinators() SpiderCoordinatorInformer { return &spiderCoordinatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercniconfig.go b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercniconfig.go new file mode 100644 index 0000000000..41fb1d3e7f --- /dev/null +++ b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercniconfig.go @@ -0,0 +1,89 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by informer-gen. DO NOT EDIT. + +package v2beta1 + +import ( + context "context" + time "time" + + apisspiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + versioned "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/spidernet-io/spiderpool/pkg/k8s/client/informers/externalversions/internalinterfaces" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// SpiderCNIConfigInformer provides access to a shared informer and lister for +// SpiderCNIConfigs. +type SpiderCNIConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() spiderpoolspidernetiov2beta1.SpiderCNIConfigLister +} + +type spiderCNIConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewSpiderCNIConfigInformer constructs a new informer for SpiderCNIConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSpiderCNIConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSpiderCNIConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSpiderCNIConfigInformer constructs a new informer for SpiderCNIConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSpiderCNIConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderCNIConfigs(namespace).List(context.Background(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderCNIConfigs(namespace).Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderCNIConfigs(namespace).List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderCNIConfigs(namespace).Watch(ctx, options) + }, + }, client), + &apisspiderpoolspidernetiov2beta1.SpiderCNIConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *spiderCNIConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSpiderCNIConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *spiderCNIConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisspiderpoolspidernetiov2beta1.SpiderCNIConfig{}, f.defaultInformer) +} + +func (f *spiderCNIConfigInformer) Lister() spiderpoolspidernetiov2beta1.SpiderCNIConfigLister { + return spiderpoolspidernetiov2beta1.NewSpiderCNIConfigLister(f.Informer().GetIndexer()) +} diff --git a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercoordinator.go b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercoordinator.go index 2d70a79d72..92b28d550e 100644 --- a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercoordinator.go +++ b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidercoordinator.go @@ -6,13 +6,13 @@ package v2beta1 import ( - "context" + context "context" time "time" - spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + apisspiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" versioned "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/spidernet-io/spiderpool/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // SpiderCoordinators. type SpiderCoordinatorInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta1.SpiderCoordinatorLister + Lister() spiderpoolspidernetiov2beta1.SpiderCoordinatorLister } type spiderCoordinatorInformer struct { @@ -43,21 +43,33 @@ func NewSpiderCoordinatorInformer(client versioned.Interface, resyncPeriod time. // one. This reduces memory footprint and number of connections to the server. func NewFilteredSpiderCoordinatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( - &cache.ListWatch{ + cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderCoordinators().List(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderCoordinators().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderCoordinators().Watch(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderCoordinators().Watch(context.Background(), options) }, - }, - &spiderpoolspidernetiov2beta1.SpiderCoordinator{}, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderCoordinators().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderCoordinators().Watch(ctx, options) + }, + }, client), + &apisspiderpoolspidernetiov2beta1.SpiderCoordinator{}, resyncPeriod, indexers, ) @@ -68,9 +80,9 @@ func (f *spiderCoordinatorInformer) defaultInformer(client versioned.Interface, } func (f *spiderCoordinatorInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&spiderpoolspidernetiov2beta1.SpiderCoordinator{}, f.defaultInformer) + return f.factory.InformerFor(&apisspiderpoolspidernetiov2beta1.SpiderCoordinator{}, f.defaultInformer) } -func (f *spiderCoordinatorInformer) Lister() v2beta1.SpiderCoordinatorLister { - return v2beta1.NewSpiderCoordinatorLister(f.Informer().GetIndexer()) +func (f *spiderCoordinatorInformer) Lister() spiderpoolspidernetiov2beta1.SpiderCoordinatorLister { + return spiderpoolspidernetiov2beta1.NewSpiderCoordinatorLister(f.Informer().GetIndexer()) } diff --git a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spiderippool.go b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spiderippool.go index 630b842d29..709002bb01 100644 --- a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spiderippool.go +++ b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spiderippool.go @@ -6,13 +6,13 @@ package v2beta1 import ( - "context" + context "context" time "time" - spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + apisspiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" versioned "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/spidernet-io/spiderpool/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // SpiderIPPools. type SpiderIPPoolInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta1.SpiderIPPoolLister + Lister() spiderpoolspidernetiov2beta1.SpiderIPPoolLister } type spiderIPPoolInformer struct { @@ -43,21 +43,33 @@ func NewSpiderIPPoolInformer(client versioned.Interface, resyncPeriod time.Durat // one. This reduces memory footprint and number of connections to the server. func NewFilteredSpiderIPPoolInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( - &cache.ListWatch{ + cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderIPPools().List(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderIPPools().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderIPPools().Watch(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderIPPools().Watch(context.Background(), options) }, - }, - &spiderpoolspidernetiov2beta1.SpiderIPPool{}, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderIPPools().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderIPPools().Watch(ctx, options) + }, + }, client), + &apisspiderpoolspidernetiov2beta1.SpiderIPPool{}, resyncPeriod, indexers, ) @@ -68,9 +80,9 @@ func (f *spiderIPPoolInformer) defaultInformer(client versioned.Interface, resyn } func (f *spiderIPPoolInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&spiderpoolspidernetiov2beta1.SpiderIPPool{}, f.defaultInformer) + return f.factory.InformerFor(&apisspiderpoolspidernetiov2beta1.SpiderIPPool{}, f.defaultInformer) } -func (f *spiderIPPoolInformer) Lister() v2beta1.SpiderIPPoolLister { - return v2beta1.NewSpiderIPPoolLister(f.Informer().GetIndexer()) +func (f *spiderIPPoolInformer) Lister() spiderpoolspidernetiov2beta1.SpiderIPPoolLister { + return spiderpoolspidernetiov2beta1.NewSpiderIPPoolLister(f.Informer().GetIndexer()) } diff --git a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go index 6bb7ba8a9d..4a15155678 100644 --- a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go +++ b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go @@ -6,13 +6,13 @@ package v2beta1 import ( - "context" + context "context" time "time" - spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + apisspiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" versioned "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/spidernet-io/spiderpool/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // SpiderMultusConfigs. type SpiderMultusConfigInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta1.SpiderMultusConfigLister + Lister() spiderpoolspidernetiov2beta1.SpiderMultusConfigLister } type spiderMultusConfigInformer struct { @@ -44,21 +44,33 @@ func NewSpiderMultusConfigInformer(client versioned.Interface, namespace string, // one. This reduces memory footprint and number of connections to the server. func NewFilteredSpiderMultusConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( - &cache.ListWatch{ + cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderMultusConfigs(namespace).List(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderMultusConfigs(namespace).List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderMultusConfigs(namespace).Watch(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderMultusConfigs(namespace).Watch(context.Background(), options) }, - }, - &spiderpoolspidernetiov2beta1.SpiderMultusConfig{}, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderMultusConfigs(namespace).List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderMultusConfigs(namespace).Watch(ctx, options) + }, + }, client), + &apisspiderpoolspidernetiov2beta1.SpiderMultusConfig{}, resyncPeriod, indexers, ) @@ -69,9 +81,9 @@ func (f *spiderMultusConfigInformer) defaultInformer(client versioned.Interface, } func (f *spiderMultusConfigInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&spiderpoolspidernetiov2beta1.SpiderMultusConfig{}, f.defaultInformer) + return f.factory.InformerFor(&apisspiderpoolspidernetiov2beta1.SpiderMultusConfig{}, f.defaultInformer) } -func (f *spiderMultusConfigInformer) Lister() v2beta1.SpiderMultusConfigLister { - return v2beta1.NewSpiderMultusConfigLister(f.Informer().GetIndexer()) +func (f *spiderMultusConfigInformer) Lister() spiderpoolspidernetiov2beta1.SpiderMultusConfigLister { + return spiderpoolspidernetiov2beta1.NewSpiderMultusConfigLister(f.Informer().GetIndexer()) } diff --git a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidersubnet.go b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidersubnet.go index b88eff8a58..18bafba745 100644 --- a/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidersubnet.go +++ b/pkg/k8s/client/informers/externalversions/spiderpool.spidernet.io/v2beta1/spidersubnet.go @@ -6,13 +6,13 @@ package v2beta1 import ( - "context" + context "context" time "time" - spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + apisspiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" versioned "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/spidernet-io/spiderpool/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // SpiderSubnets. type SpiderSubnetInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta1.SpiderSubnetLister + Lister() spiderpoolspidernetiov2beta1.SpiderSubnetLister } type spiderSubnetInformer struct { @@ -43,21 +43,33 @@ func NewSpiderSubnetInformer(client versioned.Interface, resyncPeriod time.Durat // one. This reduces memory footprint and number of connections to the server. func NewFilteredSpiderSubnetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( - &cache.ListWatch{ + cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderSubnets().List(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderSubnets().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SpiderpoolV2beta1().SpiderSubnets().Watch(context.TODO(), options) + return client.SpiderpoolV2beta1().SpiderSubnets().Watch(context.Background(), options) }, - }, - &spiderpoolspidernetiov2beta1.SpiderSubnet{}, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderSubnets().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SpiderpoolV2beta1().SpiderSubnets().Watch(ctx, options) + }, + }, client), + &apisspiderpoolspidernetiov2beta1.SpiderSubnet{}, resyncPeriod, indexers, ) @@ -68,9 +80,9 @@ func (f *spiderSubnetInformer) defaultInformer(client versioned.Interface, resyn } func (f *spiderSubnetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&spiderpoolspidernetiov2beta1.SpiderSubnet{}, f.defaultInformer) + return f.factory.InformerFor(&apisspiderpoolspidernetiov2beta1.SpiderSubnet{}, f.defaultInformer) } -func (f *spiderSubnetInformer) Lister() v2beta1.SpiderSubnetLister { - return v2beta1.NewSpiderSubnetLister(f.Informer().GetIndexer()) +func (f *spiderSubnetInformer) Lister() spiderpoolspidernetiov2beta1.SpiderSubnetLister { + return spiderpoolspidernetiov2beta1.NewSpiderSubnetLister(f.Informer().GetIndexer()) } diff --git a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/expansion_generated.go b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/expansion_generated.go index cc5d1e5e04..2c8cb38370 100644 --- a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/expansion_generated.go +++ b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/expansion_generated.go @@ -5,6 +5,14 @@ package v2beta1 +// SpiderCNIConfigListerExpansion allows custom methods to be added to +// SpiderCNIConfigLister. +type SpiderCNIConfigListerExpansion interface{} + +// SpiderCNIConfigNamespaceListerExpansion allows custom methods to be added to +// SpiderCNIConfigNamespaceLister. +type SpiderCNIConfigNamespaceListerExpansion interface{} + // SpiderCoordinatorListerExpansion allows custom methods to be added to // SpiderCoordinatorLister. type SpiderCoordinatorListerExpansion interface{} diff --git a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercniconfig.go b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercniconfig.go new file mode 100644 index 0000000000..fe6de062be --- /dev/null +++ b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercniconfig.go @@ -0,0 +1,57 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by lister-gen. DO NOT EDIT. + +package v2beta1 + +import ( + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// SpiderCNIConfigLister helps list SpiderCNIConfigs. +// All objects returned here must be treated as read-only. +type SpiderCNIConfigLister interface { + // List lists all SpiderCNIConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderCNIConfig, err error) + // SpiderCNIConfigs returns an object that can list and get SpiderCNIConfigs. + SpiderCNIConfigs(namespace string) SpiderCNIConfigNamespaceLister + SpiderCNIConfigListerExpansion +} + +// spiderCNIConfigLister implements the SpiderCNIConfigLister interface. +type spiderCNIConfigLister struct { + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderCNIConfig] +} + +// NewSpiderCNIConfigLister returns a new SpiderCNIConfigLister. +func NewSpiderCNIConfigLister(indexer cache.Indexer) SpiderCNIConfigLister { + return &spiderCNIConfigLister{listers.New[*spiderpoolspidernetiov2beta1.SpiderCNIConfig](indexer, spiderpoolspidernetiov2beta1.Resource("spidercniconfig"))} +} + +// SpiderCNIConfigs returns an object that can list and get SpiderCNIConfigs. +func (s *spiderCNIConfigLister) SpiderCNIConfigs(namespace string) SpiderCNIConfigNamespaceLister { + return spiderCNIConfigNamespaceLister{listers.NewNamespaced[*spiderpoolspidernetiov2beta1.SpiderCNIConfig](s.ResourceIndexer, namespace)} +} + +// SpiderCNIConfigNamespaceLister helps list and get SpiderCNIConfigs. +// All objects returned here must be treated as read-only. +type SpiderCNIConfigNamespaceLister interface { + // List lists all SpiderCNIConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderCNIConfig, err error) + // Get retrieves the SpiderCNIConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*spiderpoolspidernetiov2beta1.SpiderCNIConfig, error) + SpiderCNIConfigNamespaceListerExpansion +} + +// spiderCNIConfigNamespaceLister implements the SpiderCNIConfigNamespaceLister +// interface. +type spiderCNIConfigNamespaceLister struct { + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderCNIConfig] +} diff --git a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercoordinator.go b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercoordinator.go index 096a3991f8..493bb39713 100644 --- a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercoordinator.go +++ b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidercoordinator.go @@ -6,10 +6,10 @@ package v2beta1 import ( - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SpiderCoordinatorLister helps list SpiderCoordinators. @@ -17,39 +17,19 @@ import ( type SpiderCoordinatorLister interface { // List lists all SpiderCoordinators in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.SpiderCoordinator, err error) + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderCoordinator, err error) // Get retrieves the SpiderCoordinator from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta1.SpiderCoordinator, error) + Get(name string) (*spiderpoolspidernetiov2beta1.SpiderCoordinator, error) SpiderCoordinatorListerExpansion } // spiderCoordinatorLister implements the SpiderCoordinatorLister interface. type spiderCoordinatorLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderCoordinator] } // NewSpiderCoordinatorLister returns a new SpiderCoordinatorLister. func NewSpiderCoordinatorLister(indexer cache.Indexer) SpiderCoordinatorLister { - return &spiderCoordinatorLister{indexer: indexer} -} - -// List lists all SpiderCoordinators in the indexer. -func (s *spiderCoordinatorLister) List(selector labels.Selector) (ret []*v2beta1.SpiderCoordinator, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.SpiderCoordinator)) - }) - return ret, err -} - -// Get retrieves the SpiderCoordinator from the index for a given name. -func (s *spiderCoordinatorLister) Get(name string) (*v2beta1.SpiderCoordinator, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("spidercoordinator"), name) - } - return obj.(*v2beta1.SpiderCoordinator), nil + return &spiderCoordinatorLister{listers.New[*spiderpoolspidernetiov2beta1.SpiderCoordinator](indexer, spiderpoolspidernetiov2beta1.Resource("spidercoordinator"))} } diff --git a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spiderippool.go b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spiderippool.go index 63c665705c..b371da7a47 100644 --- a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spiderippool.go +++ b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spiderippool.go @@ -6,10 +6,10 @@ package v2beta1 import ( - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SpiderIPPoolLister helps list SpiderIPPools. @@ -17,39 +17,19 @@ import ( type SpiderIPPoolLister interface { // List lists all SpiderIPPools in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.SpiderIPPool, err error) + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderIPPool, err error) // Get retrieves the SpiderIPPool from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta1.SpiderIPPool, error) + Get(name string) (*spiderpoolspidernetiov2beta1.SpiderIPPool, error) SpiderIPPoolListerExpansion } // spiderIPPoolLister implements the SpiderIPPoolLister interface. type spiderIPPoolLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderIPPool] } // NewSpiderIPPoolLister returns a new SpiderIPPoolLister. func NewSpiderIPPoolLister(indexer cache.Indexer) SpiderIPPoolLister { - return &spiderIPPoolLister{indexer: indexer} -} - -// List lists all SpiderIPPools in the indexer. -func (s *spiderIPPoolLister) List(selector labels.Selector) (ret []*v2beta1.SpiderIPPool, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.SpiderIPPool)) - }) - return ret, err -} - -// Get retrieves the SpiderIPPool from the index for a given name. -func (s *spiderIPPoolLister) Get(name string) (*v2beta1.SpiderIPPool, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("spiderippool"), name) - } - return obj.(*v2beta1.SpiderIPPool), nil + return &spiderIPPoolLister{listers.New[*spiderpoolspidernetiov2beta1.SpiderIPPool](indexer, spiderpoolspidernetiov2beta1.Resource("spiderippool"))} } diff --git a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go index 0bd51dd048..1ceab05ebd 100644 --- a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go +++ b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidermultusconfig.go @@ -6,10 +6,10 @@ package v2beta1 import ( - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SpiderMultusConfigLister helps list SpiderMultusConfigs. @@ -17,7 +17,7 @@ import ( type SpiderMultusConfigLister interface { // List lists all SpiderMultusConfigs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.SpiderMultusConfig, err error) + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderMultusConfig, err error) // SpiderMultusConfigs returns an object that can list and get SpiderMultusConfigs. SpiderMultusConfigs(namespace string) SpiderMultusConfigNamespaceLister SpiderMultusConfigListerExpansion @@ -25,25 +25,17 @@ type SpiderMultusConfigLister interface { // spiderMultusConfigLister implements the SpiderMultusConfigLister interface. type spiderMultusConfigLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderMultusConfig] } // NewSpiderMultusConfigLister returns a new SpiderMultusConfigLister. func NewSpiderMultusConfigLister(indexer cache.Indexer) SpiderMultusConfigLister { - return &spiderMultusConfigLister{indexer: indexer} -} - -// List lists all SpiderMultusConfigs in the indexer. -func (s *spiderMultusConfigLister) List(selector labels.Selector) (ret []*v2beta1.SpiderMultusConfig, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.SpiderMultusConfig)) - }) - return ret, err + return &spiderMultusConfigLister{listers.New[*spiderpoolspidernetiov2beta1.SpiderMultusConfig](indexer, spiderpoolspidernetiov2beta1.Resource("spidermultusconfig"))} } // SpiderMultusConfigs returns an object that can list and get SpiderMultusConfigs. func (s *spiderMultusConfigLister) SpiderMultusConfigs(namespace string) SpiderMultusConfigNamespaceLister { - return spiderMultusConfigNamespaceLister{indexer: s.indexer, namespace: namespace} + return spiderMultusConfigNamespaceLister{listers.NewNamespaced[*spiderpoolspidernetiov2beta1.SpiderMultusConfig](s.ResourceIndexer, namespace)} } // SpiderMultusConfigNamespaceLister helps list and get SpiderMultusConfigs. @@ -51,36 +43,15 @@ func (s *spiderMultusConfigLister) SpiderMultusConfigs(namespace string) SpiderM type SpiderMultusConfigNamespaceLister interface { // List lists all SpiderMultusConfigs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.SpiderMultusConfig, err error) + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderMultusConfig, err error) // Get retrieves the SpiderMultusConfig from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta1.SpiderMultusConfig, error) + Get(name string) (*spiderpoolspidernetiov2beta1.SpiderMultusConfig, error) SpiderMultusConfigNamespaceListerExpansion } // spiderMultusConfigNamespaceLister implements the SpiderMultusConfigNamespaceLister // interface. type spiderMultusConfigNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all SpiderMultusConfigs in the indexer for a given namespace. -func (s spiderMultusConfigNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.SpiderMultusConfig, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.SpiderMultusConfig)) - }) - return ret, err -} - -// Get retrieves the SpiderMultusConfig from the indexer for a given namespace and name. -func (s spiderMultusConfigNamespaceLister) Get(name string) (*v2beta1.SpiderMultusConfig, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("spidermultusconfig"), name) - } - return obj.(*v2beta1.SpiderMultusConfig), nil + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderMultusConfig] } diff --git a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidersubnet.go b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidersubnet.go index ad9a9a768f..60f80bc6a4 100644 --- a/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidersubnet.go +++ b/pkg/k8s/client/listers/spiderpool.spidernet.io/v2beta1/spidersubnet.go @@ -6,10 +6,10 @@ package v2beta1 import ( - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + spiderpoolspidernetiov2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SpiderSubnetLister helps list SpiderSubnets. @@ -17,39 +17,19 @@ import ( type SpiderSubnetLister interface { // List lists all SpiderSubnets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.SpiderSubnet, err error) + List(selector labels.Selector) (ret []*spiderpoolspidernetiov2beta1.SpiderSubnet, err error) // Get retrieves the SpiderSubnet from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta1.SpiderSubnet, error) + Get(name string) (*spiderpoolspidernetiov2beta1.SpiderSubnet, error) SpiderSubnetListerExpansion } // spiderSubnetLister implements the SpiderSubnetLister interface. type spiderSubnetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*spiderpoolspidernetiov2beta1.SpiderSubnet] } // NewSpiderSubnetLister returns a new SpiderSubnetLister. func NewSpiderSubnetLister(indexer cache.Indexer) SpiderSubnetLister { - return &spiderSubnetLister{indexer: indexer} -} - -// List lists all SpiderSubnets in the indexer. -func (s *spiderSubnetLister) List(selector labels.Selector) (ret []*v2beta1.SpiderSubnet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.SpiderSubnet)) - }) - return ret, err -} - -// Get retrieves the SpiderSubnet from the index for a given name. -func (s *spiderSubnetLister) Get(name string) (*v2beta1.SpiderSubnet, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("spidersubnet"), name) - } - return obj.(*v2beta1.SpiderSubnet), nil + return &spiderSubnetLister{listers.New[*spiderpoolspidernetiov2beta1.SpiderSubnet](indexer, spiderpoolspidernetiov2beta1.Resource("spidersubnet"))} } diff --git a/pkg/multuscniconfig/multusconfig_informer.go b/pkg/multuscniconfig/multusconfig_informer.go index 7be8e05f9e..4521c46e21 100644 --- a/pkg/multuscniconfig/multusconfig_informer.go +++ b/pkg/multuscniconfig/multusconfig_informer.go @@ -20,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -338,15 +339,29 @@ func (mcc *MultusConfigController) syncHandler(ctx context.Context, multusConfig } func generateNetAttachDef(netAttachName string, multusConf *spiderpoolv2beta1.SpiderMultusConfig) (*netv1.NetworkAttachmentDefinition, error) { - multusConfSpec := multusConf.Spec.DeepCopy() - anno := multusConf.Annotations + if anno == nil { + anno = make(map[string]string) + } + return generateNetAttachDefWithSpec(netAttachName, multusConf.Namespace, multusConf.Spec, anno) +} + +func generateNetAttachDefWithSpec(netAttachName, namespace string, multusConfSpec spiderpoolv2beta1.MultusCNIConfigSpec, anno map[string]string) (*netv1.NetworkAttachmentDefinition, error) { + specCopy := multusConfSpec.DeepCopy() + if anno == nil { anno = make(map[string]string) } + if specCopy.CniType == nil { + specCopy.CniType = ptr.To(constant.CustomCNI) + } + if specCopy.EnableCoordinator == nil { + specCopy.EnableCoordinator = ptr.To(true) + } + var plugins []interface{} - for _, cf := range multusConfSpec.ChainCNIJsonData { + for _, cf := range specCopy.ChainCNIJsonData { var plugin interface{} if err := json.Unmarshal([]byte(cf), &plugin); err != nil { return nil, fmt.Errorf("failed to unmarshal chain cni config %s: %v", cf, err) @@ -354,21 +369,19 @@ func generateNetAttachDef(netAttachName string, multusConf *spiderpoolv2beta1.Sp plugins = append(plugins, plugin) } - // with Kubernetes OpenAPI validation, multusConfSpec.EnableCoordinator must not be nil - hasCoordinator := *multusConfSpec.EnableCoordinator + hasCoordinator := specCopy.EnableCoordinator != nil && *specCopy.EnableCoordinator if hasCoordinator { - coordinatorCNIConf := generateCoordinatorCNIConf(multusConfSpec.CoordinatorConfig) + coordinatorCNIConf := generateCoordinatorCNIConf(specCopy.CoordinatorConfig) // head insertion later plugins = append(plugins, coordinatorCNIConf) } disableIPAM := false - if multusConfSpec.DisableIPAM != nil && *multusConfSpec.DisableIPAM { + if specCopy.DisableIPAM != nil && *specCopy.DisableIPAM { disableIPAM = true } // we'll use the default CNI version 0.3.1 if the annotation doesn't have it. - // the annotation custom CNI version is already validated by webhook. cniVersion := spiderpoolcmd.CniVersion031 if customCNIVersion, ok := anno[constant.AnnoMultusConfigCNIVersion]; ok { cniVersion = customCNIVersion @@ -381,19 +394,18 @@ func generateNetAttachDef(netAttachName string, multusConf *spiderpoolv2beta1.Sp var confStr string var err error - // with Kubernetes OpenAPI validation, multusConfSpec.CniType must not be nil and default to "custom" - switch *multusConfSpec.CniType { + switch *specCopy.CniType { case constant.MacvlanCNI: - macvlanCNIConf := generateMacvlanCNIConf(disableIPAM, *multusConfSpec) - // head insertion + macvlanCNIConf := generateMacvlanCNIConf(disableIPAM, *specCopy) plugins = append([]interface{}{macvlanCNIConf}, plugins...) - if (multusConfSpec.MacvlanConfig.VlanID != nil && *multusConfSpec.MacvlanConfig.VlanID != 0) || - len(multusConfSpec.MacvlanConfig.Master) >= 2 { - // we need to set Subvlan as first at the CNI plugin chain - subVlanCNIConf := generateIfacer(multusConfSpec.MacvlanConfig.Master, - *multusConfSpec.MacvlanConfig.VlanID, - multusConfSpec.MacvlanConfig.Bond) - plugins = append([]interface{}{subVlanCNIConf}, plugins...) + if specCopy.MacvlanConfig != nil { + if (specCopy.MacvlanConfig.VlanID != nil && *specCopy.MacvlanConfig.VlanID != 0) || + len(specCopy.MacvlanConfig.Master) >= 2 { + subVlanCNIConf := generateIfacer(specCopy.MacvlanConfig.Master, + *specCopy.MacvlanConfig.VlanID, + specCopy.MacvlanConfig.Bond) + plugins = append([]interface{}{subVlanCNIConf}, plugins...) + } } confStr, err = marshalCniConfig2String(cniConfigName, cniVersion, plugins) if err != nil { @@ -401,109 +413,91 @@ func generateNetAttachDef(netAttachName string, multusConf *spiderpoolv2beta1.Sp } case constant.IPVlanCNI: - ipvlanCNIConf := generateIPvlanCNIConf(disableIPAM, *multusConfSpec) - // head insertion + ipvlanCNIConf := generateIPvlanCNIConf(disableIPAM, *specCopy) plugins = append([]interface{}{ipvlanCNIConf}, plugins...) - if (multusConfSpec.IPVlanConfig.VlanID != nil && *multusConfSpec.IPVlanConfig.VlanID != 0) || - len(multusConfSpec.IPVlanConfig.Master) >= 2 { - // we need to set Subvlan as first at the CNI plugin chain - subVlanCNIConf := generateIfacer(multusConfSpec.IPVlanConfig.Master, - *multusConfSpec.IPVlanConfig.VlanID, - multusConfSpec.IPVlanConfig.Bond) - plugins = append([]interface{}{subVlanCNIConf}, plugins...) + if specCopy.IPVlanConfig != nil { + if (specCopy.IPVlanConfig.VlanID != nil && *specCopy.IPVlanConfig.VlanID != 0) || + len(specCopy.IPVlanConfig.Master) >= 2 { + subVlanCNIConf := generateIfacer(specCopy.IPVlanConfig.Master, + *specCopy.IPVlanConfig.VlanID, + specCopy.IPVlanConfig.Bond) + plugins = append([]interface{}{subVlanCNIConf}, plugins...) + } } - confStr, err = marshalCniConfig2String(cniConfigName, cniVersion, plugins) if err != nil { return nil, fmt.Errorf("failed to marshalCniConfig2String: %w", err) } case constant.SriovCNI: - // SRIOV special annotation - anno[constant.ResourceNameAnnot] = *multusConfSpec.SriovConfig.ResourceName - - if multusConfSpec.SriovConfig.RdmaIsolation != nil && *multusConfSpec.SriovConfig.RdmaIsolation { - rdmaconf := RdmaNetConf{ - Type: "rdma", - } + if specCopy.SriovConfig != nil && specCopy.SriovConfig.ResourceName != nil { + anno[constant.ResourceNameAnnot] = *specCopy.SriovConfig.ResourceName + } + if specCopy.SriovConfig != nil && specCopy.SriovConfig.RdmaIsolation != nil && *specCopy.SriovConfig.RdmaIsolation { + rdmaconf := RdmaNetConf{Type: "rdma"} plugins = append([]interface{}{rdmaconf}, plugins...) } - - sriovCNIConf := generateSriovCNIConf(disableIPAM, *multusConfSpec) - // head insertion + sriovCNIConf := generateSriovCNIConf(disableIPAM, *specCopy) plugins = append([]interface{}{sriovCNIConf}, plugins...) - - if multusConfSpec.SriovConfig.MTU != nil && *multusConfSpec.SriovConfig.MTU > 0 { - tuningConf := tuningConf{ - Type: "tuning", - Mtu: *multusConfSpec.SriovConfig.MTU, - } - // head insertion + if specCopy.SriovConfig != nil && specCopy.SriovConfig.MTU != nil && *specCopy.SriovConfig.MTU > 0 { + tuningConf := tuningConf{Type: "tuning", Mtu: *specCopy.SriovConfig.MTU} plugins = append(plugins, tuningConf) } - confStr, err = marshalCniConfig2String(cniConfigName, cniVersion, plugins) if err != nil { return nil, fmt.Errorf("failed to marshal sriov cniConfig to String: %w", err) } case constant.IBSriovCNI: - // SRIOV special annotation - anno[constant.ResourceNameAnnot] = *multusConfSpec.IbSriovConfig.ResourceName - - cniConf := generateIBSriovCNIConf(disableIPAM, *multusConfSpec) - // head insertion + if specCopy.IbSriovConfig != nil && specCopy.IbSriovConfig.ResourceName != nil { + anno[constant.ResourceNameAnnot] = *specCopy.IbSriovConfig.ResourceName + } + cniConf := generateIBSriovCNIConf(disableIPAM, *specCopy) plugins = append([]interface{}{cniConf}, plugins...) - confStr, err = marshalCniConfig2String(cniConfigName, cniVersion, plugins) if err != nil { return nil, fmt.Errorf("failed to marshal ib-sriov cniConfig to String: %w", err) } case constant.IPoIBCNI: - cniConf := generateIpoibCNIConf(disableIPAM, *multusConfSpec) - // head insertion + cniConf := generateIpoibCNIConf(disableIPAM, *specCopy) plugins = append([]interface{}{cniConf}, plugins...) - confStr, err = marshalCniConfig2String(cniConfigName, cniVersion, plugins) if err != nil { return nil, fmt.Errorf("failed to marshal ipoib cniConfig to String: %w", err) } case constant.OvsCNI: - ovsConf := generateOvsCNIConf(disableIPAM, multusConfSpec) + ovsConf := generateOvsCNIConf(disableIPAM, specCopy) plugins = append([]interface{}{ovsConf}, plugins...) confStr, err = marshalCniConfig2String(cniConfigName, cniVersion, plugins) if err != nil { return nil, fmt.Errorf("failed to marshal ovs cniConfig to String: %w", err) } - if multusConfSpec.OvsConfig.DeviceID != "" { - anno[constant.ResourceNameAnnot] = fmt.Sprintf("%s/%s", constant.ResourceNameOvsCniValue, multusConfSpec.OvsConfig.BrName) + if specCopy.OvsConfig != nil && specCopy.OvsConfig.DeviceID != "" { + anno[constant.ResourceNameAnnot] = fmt.Sprintf("%s/%s", constant.ResourceNameOvsCniValue, specCopy.OvsConfig.BrName) } case constant.CustomCNI: - if multusConfSpec.CustomCNIConfig != nil && len(*multusConfSpec.CustomCNIConfig) > 0 { - if !json.Valid([]byte(*multusConfSpec.CustomCNIConfig)) { + if specCopy.CustomCNIConfig != nil && len(*specCopy.CustomCNIConfig) > 0 { + if !json.Valid([]byte(*specCopy.CustomCNIConfig)) { return nil, fmt.Errorf("customCniConfig isn't a valid JSON encoding") } - confStr = *multusConfSpec.CustomCNIConfig + confStr = *specCopy.CustomCNIConfig } default: - // It's impossible get into the default branch - return nil, fmt.Errorf("%w: unrecognized CNI type %s", constant.ErrWrongInput, *multusConfSpec.CniType) + return nil, fmt.Errorf("%w: unrecognized CNI type %s", constant.ErrWrongInput, *specCopy.CniType) } netAttachDef := &netv1.NetworkAttachmentDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: netAttachName, - Namespace: multusConf.Namespace, + Namespace: namespace, Annotations: anno, }, } if len(confStr) > 0 { - netAttachDef.Spec = netv1.NetworkAttachmentDefinitionSpec{ - Config: confStr, - } + netAttachDef.Spec = netv1.NetworkAttachmentDefinitionSpec{Config: confStr} } return netAttachDef, nil } diff --git a/pkg/multuscniconfig/spidercniconfig_controller.go b/pkg/multuscniconfig/spidercniconfig_controller.go new file mode 100644 index 0000000000..5c26e8a96d --- /dev/null +++ b/pkg/multuscniconfig/spidercniconfig_controller.go @@ -0,0 +1,164 @@ +// Copyright 2025 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package multuscniconfig + +import ( + "context" + "fmt" + "reflect" + "time" + + netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "go.uber.org/zap" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ktypes "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/election" + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/pkg/logutils" + "github.com/spidernet-io/spiderpool/pkg/utils" +) + +var spiderCNIConfigController controller.Controller + +func SetupSpiderCNIConfigController(mgr ctrl.Manager, leader election.SpiderLeaseElector) error { + if mgr == nil { + return fmt.Errorf("controller-runtime manager %w", constant.ErrMissingRequiredParam) + } + + r := &spiderCNIConfigReconciler{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + leader: leader, + targetNamespace: utils.GetAgentNamespace(), + logger: logutils.Logger.Named("SpiderCNIConfig-Controller"), + } + + var err error + if spiderCNIConfigController == nil { + spiderCNIConfigController, err = controller.New(constant.KindSpiderCNIConfig, mgr, controller.Options{Reconciler: r, SkipNameValidation: ptr.To(true)}) + if err != nil { + return err + } + } + + if err := spiderCNIConfigController.Watch( + source.Kind[*spiderpoolv2beta1.SpiderCNIConfig]( + mgr.GetCache(), + &spiderpoolv2beta1.SpiderCNIConfig{}, + &handler.TypedEnqueueRequestForObject[*spiderpoolv2beta1.SpiderCNIConfig]{}, + ), + ); err != nil { + return err + } + + return nil +} + +type spiderCNIConfigReconciler struct { + client client.Client + scheme *runtime.Scheme + leader election.SpiderLeaseElector + targetNamespace string + logger *zap.Logger +} + +func (r *spiderCNIConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + if r.leader != nil && !r.leader.IsElected() { + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + log := r.logger.With(zap.String("SpiderCNIConfig", req.Name)) + + cnicfg := &spiderpoolv2beta1.SpiderCNIConfig{} + if err := r.client.Get(ctx, ktypes.NamespacedName{Name: req.Name}, cnicfg); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + netAttachName := cnicfg.Name + if cnicfg.Annotations != nil { + if tmpName, ok := cnicfg.Annotations[constant.AnnoNetAttachConfName]; ok { + netAttachName = tmpName + } + } + + anno := make(map[string]string) + for k, v := range cnicfg.Annotations { + anno[k] = v + } + + isExist := true + netAttachDef := &netv1.NetworkAttachmentDefinition{} + err := r.client.Get(ctx, ktypes.NamespacedName{Namespace: r.targetNamespace, Name: netAttachName}, netAttachDef) + if err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, err + } + isExist = false + } + + newNetAttachDef, err := generateNetAttachDefWithSpec(netAttachName, r.targetNamespace, cnicfg.Spec, anno) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to generate net-attach-def, error: %w", err) + } + + if err := controllerutil.SetControllerReference(cnicfg, newNetAttachDef, r.scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set net-attach-def %s owner reference with SpiderCNIConfig %s, error: %w", + newNetAttachDef.Name, cnicfg.Name, err) + } + + if isExist { + if netAttachDef.DeletionTimestamp != nil { + return ctrl.Result{RequeueAfter: 2 * time.Second}, nil + } + + isNeedUpdate := false + if !reflect.DeepEqual(netAttachDef.Annotations, newNetAttachDef.Annotations) { + log.Debug("SpiderCNIConfig annotation changed") + netAttachDef.SetAnnotations(newNetAttachDef.Annotations) + isNeedUpdate = true + } + + if netAttachDef.Spec.Config != newNetAttachDef.Spec.Config { + log.Debug("SpiderCNIConfig CNI configuration changed") + netAttachDef.Spec.Config = newNetAttachDef.Spec.Config + isNeedUpdate = true + } + + if !metav1.IsControlledBy(netAttachDef, cnicfg) { + log.Debug("net-attach-def ownerReference was removed, try to add it") + netAttachDef.SetOwnerReferences(newNetAttachDef.GetOwnerReferences()) + isNeedUpdate = true + } + + if isNeedUpdate { + log.Info("try to update net-attach-def", zap.String("nad", fmt.Sprintf("%s/%s", netAttachDef.Namespace, netAttachDef.Name))) + if err := r.client.Update(ctx, netAttachDef); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update net-attach-def %s/%s, error: %w", netAttachDef.Namespace, netAttachDef.Name, err) + } + } + + return ctrl.Result{}, nil + } + + log.Info("try to create net-attach-def", zap.String("nad", fmt.Sprintf("%s/%s", newNetAttachDef.Namespace, newNetAttachDef.Name))) + if err := r.client.Create(ctx, newNetAttachDef); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create net-attach-def %s/%s, error: %w", newNetAttachDef.Namespace, newNetAttachDef.Name, err) + } + + return ctrl.Result{}, nil +} diff --git a/pkg/networking/networking/route.go b/pkg/networking/networking/route.go index 19a0b0c5f8..9d8bb4fb18 100644 --- a/pkg/networking/networking/route.go +++ b/pkg/networking/networking/route.go @@ -78,7 +78,7 @@ func DelToRuleTable(dst *net.IPNet, ruleTable int) error { func AddRuleTableWithMark(mark, ruleTable, ipFamily int) error { rule := netlink.NewRule() - rule.Mark = mark + rule.Mark = uint32(mark) rule.Table = ruleTable rule.Family = ipFamily rule.Priority = defaultRulePriority diff --git a/pkg/networking/networking/sys.go b/pkg/networking/networking/sys.go index 6941528024..3700aec798 100644 --- a/pkg/networking/networking/sys.go +++ b/pkg/networking/networking/sys.go @@ -1,18 +1,28 @@ -// Copyright 2023 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 - package networking import ( + "bytes" "fmt" "os" "path" "path/filepath" + "strconv" + "strings" +) + +var ( + SysClassNetDevicePath = "/sys/class/net" + SysVirtualNetDevicePath = "/sys/devices/virtual/net" + SysBusPciDevicesPath = "/sys/bus/pci/devices" + SysDevicePciPath = "/sys/devices/pci" ) -const ( - SysClassNetDevicePath = "/sys/class/net" - SysBusPciDevicesPath = "/sys/bus/pci/devices" +// PCI device class: https://admin.pci-ids.ucw.cz/read/PD/ +var ( + ETH_DEVICE_CLASS = "0x020000" + INFINIBAND_DEVICE_CLASS = "0x020700" + GPU_DEVICE_CLASS = "0x030200" + GPU1_DEVICE_CLASS = "0x038000" ) func GetPfNameFromVfDeviceId(vfDeviceID string) (string, error) { @@ -24,6 +34,21 @@ func GetPfNameFromVfDeviceId(vfDeviceID string) (string, error) { return GetPfNameFromPfDeviceID(pfDeviceID) } +func GetPfNameFromPfDeviceID(pfDeviceID string) (string, error) { + // Get the network interface name from PCI address + pfNetDir := path.Join(SysBusPciDevicesPath, pfDeviceID, "net") + dirs, err := os.ReadDir(pfNetDir) + if err != nil { + return "", fmt.Errorf("failed to read net directory for pf %s: %v", pfDeviceID, err) + } + + if len(dirs) == 0 { + return "", fmt.Errorf("no network interface found for pf %s", pfDeviceID) + } + + return dirs[0].Name(), nil +} + func GetPfDeviceIDFromVF(vfDeviceID string) (string, error) { // First try the traditional approach via sysfs (works in host namespace) vf_physfn := path.Join(SysBusPciDevicesPath, vfDeviceID, "physfn") @@ -45,17 +70,410 @@ func GetPfDeviceIDFromVF(vfDeviceID string) (string, error) { return filepath.Base(physfnPath), nil } -func GetPfNameFromPfDeviceID(pfDeviceID string) (string, error) { +// IsVirtualInterface checks if the interface is virtual or not by +// checking if the /sys/devices/virtual/net/{ifName} exists +func IsVirtualNetDevice(ifName string) (bool, error) { + devicePath := path.Join(SysVirtualNetDevicePath, ifName) + _, err := os.Lstat(devicePath) + if err == nil { + return true, nil + } + + // if !os.IsNotExist(err) { + // return false, err + // } + return false, nil +} + +func GetPciAddessForNetDev(ifName string) (string, error) { + // get pci info from sysfs + pciPath := fmt.Sprintf("%s/%s/device", SysClassNetDevicePath, ifName) + if _, err := os.Lstat(pciPath); err != nil { + return "", err + } + + // get pci address + pciAddr, err := os.Readlink(pciPath) + if err != nil { + return "", err + } + + return filepath.Base(pciAddr), nil +} + +func GetNetNameFromPciAddress(pciAddress string) (string, error) { + // Validate PCI address format + if pciAddress == "" { + return "", fmt.Errorf("empty PCI address") + } + + // Check if the PCI device exists + pciDevPath := path.Join(SysBusPciDevicesPath, pciAddress) + if _, err := os.Stat(pciDevPath); err != nil { + return "", fmt.Errorf("PCI device %s not found: %v", pciAddress, err) + } + // Get the network interface name from PCI address - pfNetDir := path.Join(SysBusPciDevicesPath, pfDeviceID, "net") + netDir := path.Join(pciDevPath, "net") + dirs, err := os.ReadDir(netDir) + if err != nil { + return "", fmt.Errorf("failed to read net directory for PCI device %s: %v", pciAddress, err) + } + + if len(dirs) == 0 { + return "", fmt.Errorf("no network interface found for PCI device %s", pciAddress) + } + + // Return the first network interface name + return dirs[0].Name(), nil +} + +func GetPciDeviceIdForNetDev(ifName string) (string, error) { + datas, err := GetSysDeviceConfigForNetDev(ifName, "device") + if err != nil { + return "", err + } + + return datas, nil +} + +func GetPciVendorForNetDev(ifName string) (string, error) { + datas, err := GetSysDeviceConfigForNetDev(ifName, "vendor") + if err != nil { + return "", err + } + + return datas, nil +} + +// GetSriovTotalVfsForNetDev get sriov vf count from sysfs +func GetSriovTotalVfsForNetDev(ifName string) (int, error) { + totalvfsBytes, err := GetSysDeviceConfigForNetDev(ifName, "sriov_totalvfs") + if err != nil { + return 0, err + } + + vfs, err := strconv.Atoi(totalvfsBytes) + if err != nil { + return 0, err + } + return vfs, nil +} + +func SriovTotalVfsFromPciBus(pciAddress string) int { + total, err := os.ReadFile(SysBusPciDevicesPath + "/" + pciAddress + "/" + "sriov_totalvfs") + if err != nil { + return 0 + } + + total = bytes.TrimSpace(total) + t, err := strconv.Atoi(string(total)) + if err != nil { + return 0 + } + return t +} + +func IsSriovPfForNetDev(iface string) (bool, error) { + _, err := GetSysDeviceConfigForNetDev(iface, "sriov_totalvfs") + if err == nil { + return true, nil + } + + if os.IsNotExist(err) { + return false, nil + } + + return false, err +} + +// IsSriovVfForNetDev checks if the netdev is sriov vf or not by checking if +// the /sys/class/net/{ifName}/device/physfn exists +func IsSriovVfForNetDev(iface string) bool { + vf_physfn := path.Join(SysClassNetDevicePath, iface, "device", "physfn") + _, err := os.Lstat(vf_physfn) + if err != nil { + return false + } + + return true +} + +func GetPfFromVF(vfName string) (string, error) { + vf_physfn := path.Join(SysClassNetDevicePath, vfName, "device", "physfn") + // Check if the physfn symlink exists + physfnInfo, err := os.Lstat(vf_physfn) + if err != nil { + return "", fmt.Errorf("failed to get physfn info for vf %s: %v", vfName, err) + } + + if physfnInfo.Mode()&os.ModeSymlink == 0 { + return "", fmt.Errorf("physfn %s is not a symlink", vf_physfn) + } + + // Read the path that the symlink points to + physfnPath, err := os.Readlink(vf_physfn) + if err != nil { + return "", fmt.Errorf("failed to read physfn symlink for vf %s: %v", vfName, err) + } + + // Get the PF's PCI address (last path component) + pfPciAddr := filepath.Base(physfnPath) + + // Get the network interface name from PCI address + pfNetDir := path.Join(SysBusPciDevicesPath, pfPciAddr, "net") dirs, err := os.ReadDir(pfNetDir) if err != nil { - return "", fmt.Errorf("failed to read net directory for pf %s: %v", pfDeviceID, err) + return "", fmt.Errorf("failed to read net directory for pf %s: %v", pfPciAddr, err) } if len(dirs) == 0 { - return "", fmt.Errorf("no network interface found for pf %s", pfDeviceID) + return "", fmt.Errorf("no network interface found for pf %s", pfPciAddr) } return dirs[0].Name(), nil } + +func IsSriovVfFromPciAddress(pciAddress string) (bool, error) { + _, err := os.Stat(SysBusPciDevicesPath + "/" + pciAddress + "/" + "physfn") + if err == nil { + return true, nil + } + + return false, err +} + +func GetSysDeviceConfigForNetDev(iface, attribute string) (string, error) { + path := fmt.Sprintf("%s/%s/device/%s", SysClassNetDevicePath, iface, attribute) + if _, err := os.Lstat(path); err != nil { + return "", err + } + + // read attribute + attributeBytes, err := os.ReadFile(path) + if err != nil { + return "", err + } + return string(bytes.TrimSpace(attributeBytes)), nil +} + +func GetSysDeviceConfigForPciDev(dev, attribute string) (string, error) { + path := fmt.Sprintf("%s/%s/%s", SysBusPciDevicesPath, dev, attribute) + if _, err := os.Lstat(path); err != nil { + return "", err + } + + // read attribute + attributeBytes, err := os.ReadFile(path) + if err != nil { + return "", err + } + return string(bytes.TrimSpace(attributeBytes)), nil +} + +// GetSriovAvailableVfPciAddressesForNetDev returns the list of available VF PCI addresses for +// the given network device. +func GetSriovAvailableVfPciAddressesForNetDev(ifName string) ([]string, error) { + // get total VFs + totalVfs, err := GetSriovTotalVfsForNetDev(ifName) + if err != nil { + return nil, fmt.Errorf("failed to get total VFs for interface %s: %v", ifName, err) + } + + pciAddress, err := GetPciAddessForNetDev(ifName) + if err != nil { + return nil, fmt.Errorf("failed to get PCI address for interface %s: %v", ifName, err) + } + + availableVfPciAddresses := []string{} + for i := 0; i < totalVfs; i++ { + vfDir := fmt.Sprintf("virtfn%d", i) + vfPath := path.Join(SysBusPciDevicesPath, pciAddress, vfDir) + + // check if VF directory exists + if _, err := os.Stat(vfPath); os.IsNotExist(err) { + continue + } + + // get VF PCI address + vfPciAddrPath, err := os.Readlink(vfPath) + if err != nil { + continue + } + vfPciAddr := filepath.Base(vfPciAddrPath) + + // check if net directory exists + vfNetDir := path.Join(vfPath, "net") + + // if net directory does not exist, VF may be unavailable + if _, err := os.Stat(vfNetDir); os.IsNotExist(err) { + continue + } + + files, err := os.ReadDir(vfNetDir) + if err != nil { + continue + } + + // if the net directory is empty, VF is assigned to a net namespace + if len(files) == 0 { + continue + } + availableVfPciAddresses = append(availableVfPciAddresses, vfPciAddr) + } + + return availableVfPciAddresses, nil +} + +// GetVFList returns a List containing PCI addr for all VF discovered in a given PF +func GetVFList(pfPciAddr string) (vfList []string, err error) { + vfList = make([]string, 0) + pfDir := path.Join(SysBusPciDevicesPath, pfPciAddr) + _, err = os.Stat(pfDir) + if err != nil { + err = fmt.Errorf("could not get PF directory information for device: %s, Err: %v", pfDir, err) + return + } + + vfDirs, err := filepath.Glob(filepath.Join(pfDir, "virtfn*")) + if err != nil { + err = fmt.Errorf("error reading VF directories %v", err) + return + } + + // Read all VF directory and get add VF PCI addr to the vfList + for _, dir := range vfDirs { + dirInfo, err := os.Lstat(dir) + if err == nil && (dirInfo.Mode()&os.ModeSymlink != 0) { + linkName, err := filepath.EvalSymlinks(dir) + if err == nil { + vfLink := filepath.Base(linkName) + vfList = append(vfList, vfLink) + } + } + } + return +} + +// GetNetdevBandwidth retrieves the bandwidth of a network device in Mbps. +// Returns speed in Mbps and a bool indicating if the device supports duplex mode. +func GetNetdevBandwidth(ifName string) (int, error) { + // Read speed from sysfs + speedPath := fmt.Sprintf("%s/%s/speed", SysClassNetDevicePath, ifName) + if _, err := os.Stat(speedPath); err != nil { + return 0, fmt.Errorf("failed to stat speed path for %s: %v", ifName, err) + } + + speedBytes, err := os.ReadFile(speedPath) + if err != nil { + return 0, fmt.Errorf("failed to read speed for network device %s: %v", ifName, err) + } + + // Convert speed from string to int + speedStr := string(bytes.TrimSpace(speedBytes)) + speed, err := strconv.Atoi(speedStr) + if err != nil { + return 0, fmt.Errorf("failed to parse speed value '%s' for network device %s: %v", speedStr, ifName, err) + } + + return speed, nil +} + +// getPciPathFromReadLink returns the PCI path from the readlink of the PCI bus path +// e.g. 0000:81:00.0 -> ../../../devices/pci0000:80/0000:80:00.0/0000:81:00.0 +// return pci0000:80/0000:80:00.0/0000:81:00.0 +func getPciPathFromReadLink(pciBusPath string) (string, error) { + // found the gpu, then we check the pci affinity with the net device + pciDevicePath, err := filepath.EvalSymlinks(pciBusPath) + if err != nil { + return "", err + } + + return strings.TrimPrefix(pciDevicePath, "/sys/devices/"), nil +} + +// GetGdrGpusForNetDevice returns the list of GPUs that are connected to the same host bridge as the network device +func GetGdrGpusForNetDevice(ifName string) (gdrGpus []string, err error) { + // Get PCI address for the network device + netDevicePciAddress := fmt.Sprintf("%s/%s/device", SysClassNetDevicePath, ifName) + if _, err := os.Lstat(netDevicePciAddress); err != nil { + return nil, err + } + + // get full pci address, e.g. 0000:81:00.0 -> pci0000:00/0000:00:02.0/0000:02:00.0/0000:03:08.0/0000:05:00.0 + netDeviceFullPciAddress, err := getPciPathFromReadLink(netDevicePciAddress) + if err != nil { + return nil, fmt.Errorf("failed to get full PCI address for network device %s: %v", ifName, err) + } + + // get numa node + netDeviceNumaNode, err := GetSysDeviceConfigForNetDev(ifName, "numa_node") + if err != nil { + return nil, fmt.Errorf("failed to get NUMA node for network device %s: %v", ifName, err) + } + + netDeviceFullPciAddressSlice := strings.Split(netDeviceFullPciAddress, "/") + + pciAddress, err := os.ReadDir(SysBusPciDevicesPath) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", SysBusPciDevicesPath, err) + } + + for _, dir := range pciAddress { + gpuPciBusPath := filepath.Join(SysBusPciDevicesPath, dir.Name()) + classBytes, err := os.ReadFile(gpuPciBusPath + "/class") + if err != nil { + continue + } + + classStr := string(bytes.TrimSpace(classBytes)) + if (classStr != GPU_DEVICE_CLASS) && (classStr != GPU1_DEVICE_CLASS) { + continue + } + + // get numa node + gpuNumaNode, err := GetSysDeviceConfigForPciDev(dir.Name(), "numa_node") + if err != nil { + continue + } + + if gpuNumaNode != netDeviceNumaNode { + // GPU and network device are not in the same NUMA node, it's SYS topology + continue + } + + // found a gpu, then we check the pci affinity with the net device + // like pci0000:ce/0000:ce:01.0/0000:cf:00.0/0000:d0:01.0/0000:d2:00.0 + gpuFullPciPath, err := getPciPathFromReadLink(gpuPciBusPath) + if err != nil { + continue + } + gpuFullPciPathSlice := strings.Split(gpuFullPciPath, "/") + isGdrEnabled := comparePciAffinity(netDeviceFullPciAddressSlice, gpuFullPciPathSlice) + if isGdrEnabled { + gdrGpus = append(gdrGpus, dir.Name()) + } + } + + return +} + +// comparePciAffinity checks the PCI affinity between the network device and the GPU. +// isPIX: Connection traversing at most a single PCIe bridge +// isPXB: Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) +func comparePciAffinity(nicPciBusSlices, gpuPciBusSlices []string) (isGdr bool) { + // corner case 1 + // if the two pci devices are directly connected to the same host bridge + // or cross only one pcie bridge, which we consider to be a PIX topology. + // pci1: 0000:80:00.0/81:00.0 + // pci2: 0000:80:00.0/81:00.1 + if len(nicPciBusSlices) < 2 || len(gpuPciBusSlices) < 2 { + return false + } + + // nic and gpu are not in the same host bridge + if nicPciBusSlices[0] != gpuPciBusSlices[0] { + return false + } + return true +} diff --git a/pkg/podmanager/pod_webhook.go b/pkg/podmanager/pod_webhook.go index 93e63e8183..048bb7d3c9 100644 --- a/pkg/podmanager/pod_webhook.go +++ b/pkg/podmanager/pod_webhook.go @@ -13,6 +13,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -32,6 +33,7 @@ type PodWebhook interface { type PWebhook struct { spiderClient crdclientset.Interface + client client.Client } // InitPodWebhook initializes the pod webhook. @@ -48,6 +50,7 @@ func InitPodWebhook(mgr ctrl.Manager) error { pw := &PWebhook{ spiderClient: spiderClient, + client: mgr.GetClient(), } // setup mutating webhook for pods @@ -74,6 +77,18 @@ func (pw *PWebhook) Default(ctx context.Context, obj runtime.Object) error { zap.String("Pod", pod.GenerateName)) mutateLogger.Sugar().Debugf("Request Pod: %+v", *pod) + // first to check if the pod has resource claims + if len(pod.Spec.ResourceClaims) > 0 { + mutateLogger.Sugar().Infof("Start to inject dra resources to pod %s/%s", pod.Namespace, pod.GenerateName) + err := InjectPodNetworkFromResourceClaim(pw.client, pod) + if err != nil { + mutateLogger.Sugar().Errorf("Failed to injected dra resources to pod %s/%s: %v", pod.Namespace, pod.GenerateName, err) + return err + } + mutateLogger.Sugar().Debugf("Success to injected dra resources to pod %s/%s", pod.Namespace, pod.GenerateName) + return nil + } + needInject := false for _, anno := range []string{constant.AnnoPodResourceInject, constant.AnnoNetworkResourceInject} { if _, ok := pod.Annotations[anno]; ok { @@ -86,7 +101,7 @@ func (pw *PWebhook) Default(ctx context.Context, obj runtime.Object) error { return nil } - err := podNetworkMutatingWebhook(pw.spiderClient, pod) + err := podNetworkMutatingWebhook(pw.spiderClient, pw.client, pod) if err != nil { mutateLogger.Sugar().Errorf("Failed to inject network resources for pod %s/%s: %v", pod.Namespace, pod.GenerateName, err) return err diff --git a/pkg/podmanager/utils.go b/pkg/podmanager/utils.go index c0a3727113..4710ca366e 100644 --- a/pkg/podmanager/utils.go +++ b/pkg/podmanager/utils.go @@ -5,18 +5,23 @@ package podmanager import ( "context" + "encoding/json" "fmt" v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" crdclientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + resourcev1 "k8s.io/api/resource/v1" k8s_resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" kubevirtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/spidernet-io/spiderpool/pkg/constant" "github.com/spidernet-io/spiderpool/pkg/multuscniconfig" + spidertypes "github.com/spidernet-io/spiderpool/pkg/types" ) func IsPodAlive(pod *corev1.Pod) bool { @@ -71,7 +76,8 @@ func IsStaticIPPod(enableStatefulSet, enableKubevirtStaticIP bool, pod *corev1.P // // Returns: // - An error if any step in the process fails, nil otherwise -func podNetworkMutatingWebhook(spiderClient crdclientset.Interface, pod *corev1.Pod) error { +func podNetworkMutatingWebhook(spiderClient crdclientset.Interface, client client.Client, pod *corev1.Pod) error { + var multusConfigs *v2beta1.SpiderMultusConfigList for _, anno := range []string{constant.AnnoPodResourceInject, constant.AnnoNetworkResourceInject} { multusLabelValue, ok := pod.Annotations[anno] if !ok { @@ -92,7 +98,7 @@ func podNetworkMutatingWebhook(spiderClient crdclientset.Interface, pod *corev1. return fmt.Errorf("failed to create label selector: %v", err) } - multusConfigs, err := spiderClient.SpiderpoolV2beta1().SpiderMultusConfigs("").List(context.TODO(), metav1.ListOptions{ + multusConfigs, err = spiderClient.SpiderpoolV2beta1().SpiderMultusConfigs("").List(context.TODO(), metav1.ListOptions{ LabelSelector: selector.String(), }) if err != nil { @@ -104,8 +110,8 @@ func podNetworkMutatingWebhook(spiderClient crdclientset.Interface, pod *corev1. } return InjectPodNetwork(pod, *multusConfigs) - } + } return nil } @@ -202,3 +208,109 @@ func DoValidateRdmaResouce(mc v2beta1.SpiderMultusConfig) error { return fmt.Errorf("RDMA resource injection does not support cniType: %s", *spec.CniType) } } + +// InjectPodNetworkFromResourceClaim injects network configurations into the pod based on the provided ResourceClaim. +// Note: we expect the ResourceClaim or ResourceClaimTemplate has been created when the pod mutating webhook. Or we +// may hit the "not found" error. +func InjectPodNetworkFromResourceClaim(client client.Client, pod *corev1.Pod) error { + var multusConfigName []string + var parameter spidertypes.ParameterConfig + getStaticNics := func(spec resourcev1.ResourceClaimSpec) error { + for _, req := range spec.Devices.Requests { + // only care our device class + if req.Exactly.DeviceClassName == constant.DRACNIDeviceClass { + multusConfigName = append(multusConfigName, req.Name) + } + } + + if len(multusConfigName) > 0 { + for _, config := range spec.Devices.Config { + if config.DeviceConfiguration.Opaque.Driver != constant.DRADriverName { + continue + } + if err := json.Unmarshal(config.DeviceConfiguration.Opaque.Parameters.Raw, ¶meter); err != nil { + return err + } + break + } + } + return nil + } + + for _, resourceClaim := range pod.Spec.ResourceClaims { + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must be set. + if resourceClaim.ResourceClaimTemplateName != nil && *resourceClaim.ResourceClaimTemplateName != "" { + rct := resourcev1.ResourceClaimTemplate{} + if err := client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: *resourceClaim.ResourceClaimTemplateName}, &rct); err != nil { + return err + } + + if err := getStaticNics(rct.Spec.Spec); err != nil { + return err + } + + if len(multusConfigName) > 0 { + break + } + } + + if resourceClaim.ResourceClaimName != nil && *resourceClaim.ResourceClaimName != "" { + rct := resourcev1.ResourceClaim{} + if err := client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: *resourceClaim.ResourceClaimName}, &rct); err != nil { + return err + } + + if err := getStaticNics(rct.Spec); err != nil { + return err + } + + // found the multus config name + if len(multusConfigName) > 0 { + break + } + } + } + + if len(multusConfigName) == 0 { + return fmt.Errorf("No multus config found from resource claim of pod %s/%s", pod.Namespace, pod.GenerateName) + } + + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + resourcesMap := make(map[string]bool) + for idx, mc := range multusConfigName { + // Update the pod's network attachment + var smc v2beta1.SpiderMultusConfig + if err := client.Get(context.TODO(), types.NamespacedName{Namespace: parameter.MultusNamaspace, Name: mc}, &smc); err != nil { + return err + } + + smcName := smc.Name + if smc.Annotations[constant.AnnoNetAttachConfName] != "" { + smcName = smc.Annotations[constant.AnnoNetAttachConfName] + } + + resourceName := multuscniconfig.ResourceName(&smc) + if resourceName != "" { + resourcesMap[resourceName] = false + } + + if idx == 0 { + pod.Annotations[constant.MultusDefaultNetAnnot] = fmt.Sprintf("%s/%s", smc.Namespace, smcName) + continue + } + + if networks, ok := pod.Annotations[constant.MultusNetworkAttachmentAnnot]; !ok { + pod.Annotations[constant.MultusNetworkAttachmentAnnot] = fmt.Sprintf("%s/%s", smc.Namespace, smcName) + } else { + pod.Annotations[constant.MultusNetworkAttachmentAnnot] = networks + "," + fmt.Sprintf("%s/%s", smc.Namespace, smcName) + } + } + + if parameter.PodDefaultRouteNic != "" { + pod.Annotations[constant.AnnoDefaultRouteInterface] = parameter.PodDefaultRouteNic + } + InjectRdmaResourceToPod(resourcesMap, pod) + return nil +} diff --git a/pkg/types/k8s.go b/pkg/types/k8s.go index 735170cdfe..3fdecb367f 100644 --- a/pkg/types/k8s.go +++ b/pkg/types/k8s.go @@ -123,10 +123,24 @@ type SpiderpoolConfigmapConfig struct { EnableValidatingResourcesDeletedWebhook bool `yaml:"enableValidatingResourcesDeletedWebhook"` IpamUnixSocketPath string `yaml:"ipamUnixSocketPath"` PodResourceInjectConfig PodResourceInjectConfig `yaml:"podResourceInject"` + DRAConfig DRAConfig `yaml:"dra"` } type PodResourceInjectConfig struct { Enabled bool `yaml:"enabled"` + EnabledDRAWebhook bool `yaml:"enabledDRAWebhook"` NamespacesExclude []string `yaml:"namespacesExclude"` NamespacesInclude []string `yaml:"namespacesInclude"` } + +type DRAConfig struct { + Enabled bool `yaml:"enabled"` + CdiDir string `yaml:"cdiDir"` + EnableNRI bool `yaml:"enabledNRI"` +} + +// ParameterConfig is the parameter config of the ResourceClaim opaque config +type ParameterConfig struct { + PodDefaultRouteNic string `json:"podDefaultRouteNic"` + MultusNamaspace string `json:"multusNamespace"` +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index d60e459e99..314f0cc811 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -6,11 +6,13 @@ package utils import ( "fmt" "net" + "os" "regexp" "sort" "strings" "github.com/containernetworking/cni/libcni" + "github.com/spidernet-io/spiderpool/pkg/constant" corev1 "k8s.io/api/core/v1" ) @@ -168,3 +170,23 @@ func ExtractK8sCIDRFromKCMPod(kcm *corev1.Pod) ([]string, []string) { return podCIDR, serviceCIDR } + +func AbsInt(a, b int) int { + if a > b { + return a - b + } + + return b - a +} + +// GetNodeName returns the current node name +func GetNodeName() string { + return os.Getenv(constant.ENV_SPIDERPOOL_NODENAME) +} + +func GetAgentNamespace() string { + if os.Getenv(constant.ENV_SPIDERPOOL_AGENT_NAMESPACE) == "" { + return constant.Spiderpool + } + return os.Getenv(constant.ENV_SPIDERPOOL_AGENT_NAMESPACE) +} diff --git a/test/e2e/common/deployment.go b/test/e2e/common/deployment.go index d7f6d2f813..5a76d38b0a 100644 --- a/test/e2e/common/deployment.go +++ b/test/e2e/common/deployment.go @@ -111,10 +111,8 @@ func GenerateDraDeploymentYaml(dpmName, claim, namespace string, replica int32) }, ResourceClaims: []corev1.PodResourceClaim{ { - Name: claim, - Source: corev1.ClaimSource{ - ResourceClaimTemplateName: ptr.To(claim), - }, + Name: claim, + ResourceClaimTemplateName: ptr.To(claim), }, }, }, diff --git a/test/e2e/spidercoordinator/spidercoordinator_suite_test.go b/test/e2e/spidercoordinator/spidercoordinator_suite_test.go index 2a069c7bcf..750238fc14 100644 --- a/test/e2e/spidercoordinator/spidercoordinator_suite_test.go +++ b/test/e2e/spidercoordinator/spidercoordinator_suite_test.go @@ -9,7 +9,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" e2e "github.com/spidernet-io/e2eframework/framework" - networkingv1 "k8s.io/api/networking/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" diff --git a/test/e2e/spidercoordinator/spidercoordinator_test.go b/test/e2e/spidercoordinator/spidercoordinator_test.go index ab892c7956..03203434ab 100644 --- a/test/e2e/spidercoordinator/spidercoordinator_test.go +++ b/test/e2e/spidercoordinator/spidercoordinator_test.go @@ -11,7 +11,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/tools/scripts/ansible/RDMA_BW_TEST_README.md b/tools/scripts/ansible/RDMA_BW_TEST_README.md new file mode 100644 index 0000000000..db965edd2d --- /dev/null +++ b/tools/scripts/ansible/RDMA_BW_TEST_README.md @@ -0,0 +1,79 @@ +RDMA 带宽测试结果汇总 +测试时间: Tue Dec 2 12:58:15 PM UTC 2025 +本地节点: gpu-266 +目标节点: 对端节点 (8 个 IP) + +源RDMA设备 源网卡 源IP 目的IP 测试类型 结果 +---------------------------------------------------------------------------------------------------- +mlx5_0 ib0 10.10.1.11 10.10.1.12 同轨 成功 (370.362850 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.17.12 跨轨 成功 (368.541393 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.33.12 跨轨 成功 (367.220538 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.49.12 跨轨 成功 (369.844259 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.65.12 跨轨 成功 (370.738409 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.81.12 跨轨 成功 (370.029054 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.97.12 跨轨 成功 (373.638663 GB/s) +mlx5_0 ib0 10.10.1.11 10.10.113.12 跨轨 成功 (371.401054 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.1.12 跨轨 成功 (373.616510 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.17.12 同轨 成功 (373.302591 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.33.12 跨轨 成功 (370.783050 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.49.12 跨轨 成功 (373.343670 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.65.12 跨轨 成功 (366.984091 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.81.12 跨轨 成功 (368.577478 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.97.12 跨轨 成功 (369.998853 GB/s) +mlx5_1 ib1 10.10.17.11 10.10.113.12 跨轨 成功 (373.941984 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.1.12 跨轨 成功 (369.227909 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.17.12 跨轨 成功 (369.150625 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.33.12 同轨 成功 (373.014241 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.49.12 跨轨 成功 (372.086704 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.65.12 跨轨 成功 (366.880731 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.81.12 跨轨 成功 (364.209165 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.97.12 跨轨 成功 (365.108925 GB/s) +mlx5_2 ib2 10.10.33.11 10.10.113.12 跨轨 成功 (371.626681 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.1.12 跨轨 成功 (383.335331 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.17.12 跨轨 成功 (373.907246 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.33.12 跨轨 成功 (374.728851 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.49.12 同轨 成功 (369.383928 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.65.12 跨轨 成功 (373.152394 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.81.12 跨轨 成功 (386.494684 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.97.12 跨轨 成功 (372.593950 GB/s) +mlx5_5 ib3 10.10.49.11 10.10.113.12 跨轨 成功 (377.608735 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.1.12 跨轨 成功 (362.640274 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.17.12 跨轨 成功 (357.128966 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.33.12 跨轨 成功 (362.986513 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.49.12 跨轨 成功 (365.124245 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.65.12 同轨 成功 (358.425353 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.81.12 跨轨 成功 (361.724690 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.97.12 跨轨 成功 (365.448083 GB/s) +mlx5_6 ib4 10.10.65.11 10.10.113.12 跨轨 成功 (365.164058 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.1.12 跨轨 成功 (354.661368 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.17.12 跨轨 成功 (358.425177 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.33.12 跨轨 成功 (369.738914 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.49.12 跨轨 成功 (364.709032 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.65.12 跨轨 成功 (362.111590 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.81.12 同轨 成功 (363.103021 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.97.12 跨轨 成功 (362.729822 GB/s) +mlx5_7 ib5 10.10.81.11 10.10.113.12 跨轨 成功 (363.336060 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.1.12 跨轨 成功 (368.847419 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.17.12 跨轨 成功 (368.204458 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.33.12 跨轨 成功 (355.489284 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.49.12 跨轨 成功 (366.832518 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.65.12 跨轨 成功 (363.954554 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.81.12 跨轨 成功 (364.474394 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.97.12 同轨 成功 (368.075394 GB/s) +mlx5_8 ib6 10.10.97.11 10.10.113.12 跨轨 成功 (360.354528 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.1.12 跨轨 成功 (365.105345 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.17.12 跨轨 成功 (375.062456 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.33.12 跨轨 成功 (357.372232 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.49.12 跨轨 成功 (349.866940 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.65.12 跨轨 成功 (372.800785 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.81.12 跨轨 成功 (366.365314 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.97.12 跨轨 成功 (362.818622 GB/s) +mlx5_9 ib7 10.10.113.11 10.10.113.12 同轨 成功 (371.430718 GB/s) + +======================================= +测试统计 +======================================= +总测试数: 64 +成功: 64 +失败: 0 +成功率: 100.00% \ No newline at end of file diff --git a/tools/scripts/ansible/config_ib_ips.sh b/tools/scripts/ansible/config_ib_ips.sh new file mode 100644 index 0000000000..ef53b621e0 --- /dev/null +++ b/tools/scripts/ansible/config_ib_ips.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# 脚本名称: config_ib_ips.sh +# 功能: 批量配置 ib0-ib7 网卡的 IP 地址 +# 用法: ./config_ib_ips.sh ... + +# 默认配置 +DEFAULT_NETMASK=20 +START_IB_INDEX=0 + +# 帮助函数 +usage() { + echo "用法: $0 ... " + echo "示例: $0 10.10.1.9 10.10.17.9 ... 10.10.113.9" + echo "" + echo "环境变量 (可选):" + echo " NETMASK: 子网掩码前缀长度 (默认: $DEFAULT_NETMASK)" + echo " START_DEV: 起始网卡编号 (默认: $START_IB_INDEX)" + exit 1 +} + +# 获取掩码 +NETMASK=${NETMASK:-$DEFAULT_NETMASK} +START_IDX=${START_DEV:-$START_IB_INDEX} + +# 检查参数数量 +if [ "$#" -ne 8 ]; then + echo "错误: 需要输入 8 个 IP 地址,实际输入了 $# 个" + usage +fi + +# 将输入参数转为数组 +IPS=("$@") +COUNT=${#IPS[@]} + +echo "=== 开始配置 IB 网卡 (掩码: /$NETMASK) ===" + +# 循环配置每个网卡 +for (( i=0; i $IP ... " + + # 1. 检查网卡是否存在 + if ! ip link show "$DEV" >/dev/null 2>&1; then + echo "[跳过] 网卡 $DEV 不存在" + continue + fi + + # 2. 启动网卡 + if ! ip link set "$DEV" up; then + echo "[失败] 无法启动网卡" + continue + fi + + # 3. 清除旧 IP (避免累积多个 IP) + ip addr flush dev "$DEV" + + # 4. 配置新 IP + if ip addr add "$IP/$NETMASK" dev "$DEV"; then + echo "[成功]" + else + echo "[失败] 无法设置 IP" + fi +done + +echo "" +echo "=== 配置结果概览 ===" +ip -br addr show | grep "^ib" | awk '{printf "%-8s %-30s %s\n", $1, $3, $2}' diff --git a/tools/scripts/ansible/deploy_rdma_bw_test.sh b/tools/scripts/ansible/deploy_rdma_bw_test.sh new file mode 100644 index 0000000000..56df140fee --- /dev/null +++ b/tools/scripts/ansible/deploy_rdma_bw_test.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# 快速部署 RDMA 带宽测试脚本到目标节点 + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# 目标节点 +NODE1="10.100.8.11" # GPU 266 +NODE2="10.100.8.12" # GPU 267 + +echo "=== 部署 RDMA 带宽测试脚本 ===" +echo "" + +# 检查脚本是否存在 +if [ ! -f "$SCRIPT_DIR/rdma_bw_test_server.sh" ] || [ ! -f "$SCRIPT_DIR/rdma_bw_test_client.sh" ]; then + echo "错误: 找不到测试脚本" + exit 1 +fi + +echo "部署到节点:" +echo " - $NODE1 (GPU 266)" +echo " - $NODE2 (GPU 267)" +echo "" + +# 部署到节点 1 +echo "正在部署到 $NODE1 ..." +scp "$SCRIPT_DIR/rdma_bw_test_server.sh" "$SCRIPT_DIR/rdma_bw_test_client.sh" "root@$NODE1:/root/" +ssh "root@$NODE1" "chmod +x /root/rdma_bw_test_*.sh" +echo "✅ $NODE1 部署完成" + +# 部署到节点 2 +echo "正在部署到 $NODE2 ..." +scp "$SCRIPT_DIR/rdma_bw_test_server.sh" "$SCRIPT_DIR/rdma_bw_test_client.sh" "root@$NODE2:/root/" +ssh "root@$NODE2" "chmod +x /root/rdma_bw_test_*.sh" +echo "✅ $NODE2 部署完成" + +echo "" +echo "=== 部署完成 ===" +echo "" +echo "开始测试:" +echo "" +echo "场景 1: GPU 266 -> GPU 267" +echo " 在 $NODE2 上: ./rdma_bw_test_server.sh" +echo " 在 $NODE1 上: ./rdma_bw_test_client.sh 12" +echo "" +echo "场景 2: GPU 267 -> GPU 266" +echo " 在 $NODE1 上: ./rdma_bw_test_server.sh" +echo " 在 $NODE2 上: ./rdma_bw_test_client.sh 11" diff --git a/tools/scripts/ansible/ib_test.yml b/tools/scripts/ansible/ib_test.yml new file mode 100644 index 0000000000..0070ecfa73 --- /dev/null +++ b/tools/scripts/ansible/ib_test.yml @@ -0,0 +1,468 @@ +--- +# IB Network Comprehensive Test Playbook +# +# Node Pairing: (0,1), (2,3), ... Even=client, Odd=server +# For odd nodes: node[0] also tests with node[-1] +# +# Usage: +# ansible-playbook -i inventory.ini ib_test.yml +# ansible-playbook -i inventory.ini ib_test.yml -e "skip_ping=true" +# ansible-playbook -i inventory.ini ib_test.yml -e "skip_ib_write=true" + +- name: IB Network Test + hosts: all + become: yes + remote_user: toor + gather_facts: yes + vars: + output_dir: /tmp/ib_test_results + skip_ping: false + skip_ib_write: false + skip_mpi_single: false + skip_mpi_multi: false + nccl_test_dir: /home/toor/nccl-tests-2.17.6/build + gpu_per_node: 8 + + tasks: + # ==================== Setup ==================== + - name: Generate shared timestamp + set_fact: + shared_timestamp: "{{ ansible_date_time.iso8601_basic_short }}" + run_once: true + + - name: Store timestamp on all nodes + set_fact: + timestamp: "{{ hostvars[groups['all'][0]].shared_timestamp }}" + + - name: Create output directory + file: + path: "{{ output_dir }}/{{ timestamp }}" + state: directory + mode: '0755' + + - name: Create local output directory + local_action: + module: file + path: "./ib_test_results/{{ timestamp }}" + state: directory + mode: '0755' + run_once: true + + # ==================== Get Management IP ==================== + - name: Get management IP + shell: ip route get 8.8.8.8 | grep -oP 'src \K\S+' + register: mgmt_ip_result + changed_when: false + + - name: Store management IP + set_fact: + mgmt_ip: "{{ mgmt_ip_result.stdout | trim }}" + + # ==================== Detect IB Devices ==================== + - name: Get IB HCA devices + shell: | + for dev in $(ls /sys/class/infiniband/ 2>/dev/null | grep "^mlx5_" | sort); do + link_layer=$(cat /sys/class/infiniband/$dev/ports/1/link_layer 2>/dev/null) + [ "$link_layer" = "InfiniBand" ] && echo "$dev" + done + register: ib_devices_result + changed_when: false + + - name: Store IB devices + set_fact: + ib_hca_list: "{{ ib_devices_result.stdout_lines }}" + + # ==================== Get IB Interface IPs ==================== + - name: Get IP for each IB device + shell: | + dev="{{ item }}" + netdev=$(rdma link show $dev/1 2>/dev/null | grep -oP 'netdev \K\S+' || true) + if [ -z "$netdev" ]; then + ib_path=$(readlink -f /sys/class/infiniband/$dev) + pci_path=$(dirname $(dirname $ib_path)) + netdev=$(ls "$pci_path/net" 2>/dev/null | head -1) + fi + [ -n "$netdev" ] && ip -4 addr show $netdev 2>/dev/null | grep -oP 'inet \K[0-9.]+' | head -1 || echo "NO_IP" + loop: "{{ ib_hca_list }}" + register: ib_ip_result + changed_when: false + + - name: Build IB device IP map + set_fact: + ib_device_ips: >- + {%- set ips = {} -%} + {%- for i in range(ib_hca_list | length) -%} + {%- set _ = ips.update({ib_hca_list[i]: ib_ip_result.results[i].stdout | trim}) -%} + {%- endfor -%} + {{ ips }} + + - name: Check all IB devices have IP + set_fact: + all_ib_have_ip: "{{ ib_device_ips.values() | select('ne', 'NO_IP') | list | length == ib_hca_list | length }}" + + - name: Show node info + debug: + msg: "{{ inventory_hostname }}: mgmt={{ mgmt_ip }}, IB={{ ib_hca_list | length }} devices, all_have_ip={{ all_ib_have_ip }}" + + # ==================== Build Node Pairs ==================== + - name: Build sorted node list + set_fact: + sorted_nodes: "{{ groups['all'] | sort }}" + run_once: true + + - name: Determine my index and role + set_fact: + my_idx: "{{ sorted_nodes.index(inventory_hostname) }}" + is_server: "{{ sorted_nodes.index(inventory_hostname) % 2 == 1 }}" + is_client: "{{ sorted_nodes.index(inventory_hostname) % 2 == 0 }}" + + - name: Determine my peer + set_fact: + my_peer: >- + {%- if my_idx | int % 2 == 0 and my_idx | int + 1 < sorted_nodes | length -%} + {{ sorted_nodes[my_idx | int + 1] }} + {%- elif my_idx | int % 2 == 1 -%} + {{ sorted_nodes[my_idx | int - 1] }} + {%- endif -%} + + - name: Show pairs + debug: + msg: "Pairs: {% for i in range(0, sorted_nodes | length, 2) %}{% if i + 1 < sorted_nodes | length %}({{ sorted_nodes[i] }},{{ sorted_nodes[i+1] }}) {% endif %}{% endfor %}" + run_once: true + + - name: Show my role + debug: + msg: "{{ inventory_hostname }}: {{ 'SERVER' if is_server | bool else 'CLIENT' }}, peer={{ my_peer | trim }}" + + # ==================== 1. Ping Test ==================== + - name: Ping Test + block: + - name: Run ping tests and generate report + shell: | + result_file="{{ output_dir }}/{{ timestamp }}/ping_report.txt" + peer_host="{{ my_peer | trim }}" + + # Create report header if not exists + if [ ! -f "$result_file" ]; then + echo "================================================================================" > "$result_file" + echo "IB 网络 Ping 测试报告" >> "$result_file" + echo "================================================================================" >> "$result_file" + echo "时间: $(date '+%Y-%m-%d %H:%M:%S')" >> "$result_file" + echo "命令: ping -c 3 -W 2 " >> "$result_file" + echo "" >> "$result_file" + printf "%-14s %-14s %-12s %-18s %-18s %-8s\n" "源节点" "目标节点" "设备" "源IP" "目标IP" "结果" >> "$result_file" + echo "--------------------------------------------------------------------------------" >> "$result_file" + fi + + {% for dev in ib_hca_list %} + src_ip="{{ ib_device_ips[dev] }}" + dst_ip="{{ hostvars[my_peer | trim].ib_device_ips[dev] }}" + if ping -c 3 -W 2 $dst_ip >/dev/null 2>&1; then + result="OK" + else + result="FAIL" + fi + printf "%-14s %-14s %-12s %-18s %-18s %-8s\n" "{{ inventory_hostname }}" "$peer_host" "{{ dev }}" "$src_ip" "$dst_ip" "$result" >> "$result_file" + {% endfor %} + ignore_errors: yes + when: not skip_ping | bool and all_ib_have_ip | bool and my_peer | trim | length > 0 + + # ==================== 2. IB Write BW/Lat Test ==================== + - name: IB Write Test + block: + - name: Cleanup old processes + shell: | + pids=$(pgrep -f "ib_write_bw|ib_write_lat" 2>/dev/null | grep -v $$ || true) + [ -n "$pids" ] && kill -9 $pids 2>/dev/null || true + ignore_errors: yes + + # Each client writes to its own file first + - name: "Client - Create local report file" + copy: + content: "" + dest: "{{ output_dir }}/{{ timestamp }}/ib_write_{{ inventory_hostname }}.txt" + when: is_client | bool + + # Server starts all HCA servers with different ports + - name: "Server - Start ib_write_bw servers" + shell: | + {% for i in range(ib_hca_list | length) %} + nohup timeout 120 ib_write_bw -d {{ ib_hca_list[i] }} -F --report_gbits -p {{ 18515 + i }} > /tmp/ib_bw_server_{{ ib_hca_list[i] }}.log 2>&1 & + {% endfor %} + sleep 3 + when: is_server | bool + + - name: Wait for servers + pause: + seconds: 5 + run_once: true + + # Client runs BW tests + - name: "Client - Run ib_write_bw tests" + shell: | + server_ip="{{ hostvars[my_peer | trim].mgmt_ip }}" + server_host="{{ my_peer | trim }}" + result_file="{{ output_dir }}/{{ timestamp }}/ib_write_{{ inventory_hostname }}.txt" + + {% for i in range(ib_hca_list | length) %} + dev="{{ ib_hca_list[i] }}" + port={{ 18515 + i }} + output=$(timeout 60 ib_write_bw -d $dev -F -D 5 --report_gbits -p $port $server_ip 2>&1) + if echo "$output" | grep -qE "Unable|Couldn't|Failed|error"; then + bw="FAILED" + else + # Extract BW from last data line (format: bytes iters BW_peak BW_avg) + bw=$(echo "$output" | grep -E '^\s*[0-9]+\s+[0-9]+' | tail -1 | awk '{print $4}') + [ -z "$bw" ] && bw="N/A" + fi + echo "BW {{ inventory_hostname }} $server_host $dev $dev $bw" >> "$result_file" + {% endfor %} + when: is_client | bool + ignore_errors: yes + + - name: Wait for BW tests + pause: + seconds: 10 + run_once: true + + - name: Cleanup BW processes + shell: | + pids=$(pgrep -f "ib_write_bw" 2>/dev/null | grep -v $$ || true) + [ -n "$pids" ] && kill -9 $pids 2>/dev/null || true + ignore_errors: yes + + # Latency test - increase timeout to 300s (8 devices * 15s each + buffer) + - name: "Server - Start ib_write_lat servers" + shell: | + {% for i in range(ib_hca_list | length) %} + nohup timeout 300 ib_write_lat -d {{ ib_hca_list[i] }} -F -D 15 -p {{ 19515 + i }} > /tmp/ib_lat_server_{{ ib_hca_list[i] }}.log 2>&1 & + {% endfor %} + sleep 5 + # Verify servers are running + for dev in {{ ib_hca_list | join(' ') }}; do + if pgrep -f "ib_write_lat -d $dev" > /dev/null; then + echo "Server for $dev started" + else + echo "WARNING: Server for $dev failed to start" + fi + done + when: is_server | bool + + - name: Wait for lat servers + pause: + seconds: 10 + run_once: true + + - name: "Client - Run ib_write_lat tests" + shell: | + server_ip="{{ hostvars[my_peer | trim].mgmt_ip }}" + server_host="{{ my_peer | trim }}" + result_file="{{ output_dir }}/{{ timestamp }}/ib_write_{{ inventory_hostname }}.txt" + log_file="{{ output_dir }}/{{ timestamp }}/ib_lat_debug_{{ inventory_hostname }}.log" + + {% for i in range(ib_hca_list | length) %} + dev="{{ ib_hca_list[i] }}" + port={{ 19515 + i }} + echo "=== Testing $dev on port $port to $server_ip ===" >> "$log_file" + output=$(timeout 30 ib_write_lat -d $dev -F -D 5 --output=latency -p $port $server_ip 2>&1) + echo "$output" >> "$log_file" + lat=$(echo "$output" | tail -1) + if echo "$lat" | grep -qE "Unable|Couldn't|Failed|error|Connection|refused|^$"; then + echo "FAILED: $lat" >> "$log_file" + lat="FAILED" + fi + echo "LAT {{ inventory_hostname }} $server_host $dev $dev $lat" >> "$result_file" + {% endfor %} + when: is_client | bool + ignore_errors: yes + + - name: Final cleanup + shell: | + pids=$(pgrep -f "ib_write_bw|ib_write_lat" 2>/dev/null | grep -v $$ || true) + [ -n "$pids" ] && kill -9 $pids 2>/dev/null || true + rm -f /tmp/ib_bw_server_*.log /tmp/ib_lat_server_*.log + ignore_errors: yes + when: not skip_ib_write | bool and my_peer | trim | length > 0 + + # ==================== 3. MPI Single-Node Test ==================== + - name: MPI Single-Node Test + block: + - name: Detect management interface + shell: ip route get 8.8.8.8 | grep -oP 'dev \K\S+' + register: mgmt_iface_result + changed_when: false + run_once: true + delegate_to: "{{ groups['all'] | first }}" + + - name: Store management interface + set_fact: + mgmt_iface: "{{ mgmt_iface_result.stdout | trim }}" + run_once: true + + - name: Check if running as root + shell: id -u + register: uid_check + changed_when: false + run_once: true + delegate_to: "{{ groups['all'] | first }}" + + - name: Set root flag + set_fact: + is_root: "{{ uid_check.stdout | trim == '0' }}" + allow_root_flag: "{{ '--allow-run-as-root' if uid_check.stdout | trim == '0' else '' }}" + run_once: true + + - name: Run all_reduce_perf single-node + shell: | + set -e + echo "=== MPI Single-Node ===" > {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt + echo "Node: {{ inventory_hostname }}" >> {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt + echo "GPUs: {{ gpu_per_node }}" >> {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt + echo "" >> {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt + + CMD="mpirun -np {{ gpu_per_node }} {{ allow_root_flag }} --map-by ppr:{{ gpu_per_node }}:node:pe=1 {{ nccl_test_dir }}/all_reduce_perf -b 1K -e 256M -f 2 -g 1" + echo "--- all_reduce_perf ---" >> {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt + echo "Command: $CMD" >> {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt + echo "Running: $CMD" + + $CMD >> {{ output_dir }}/{{ timestamp }}/mpi_single_test.txt 2>&1 + run_once: true + delegate_to: "{{ groups['all'] | first }}" + become: no + when: not skip_mpi_single | bool + + # ==================== 4. MPI Multi-Node Test ==================== + - name: MPI Multi-Node Test + block: + - name: Detect management interface + shell: ip route get 8.8.8.8 | grep -oP 'dev \K\S+' + register: mgmt_iface_result + changed_when: false + run_once: true + delegate_to: "{{ groups['all'] | first }}" + when: mgmt_iface is not defined + + - name: Store management interface + set_fact: + mgmt_iface: "{{ mgmt_iface_result.stdout | trim }}" + run_once: true + when: mgmt_iface is not defined + + - name: Check if running as root + shell: id -u + register: uid_check_multi + changed_when: false + run_once: true + delegate_to: "{{ groups['all'] | first }}" + when: is_root is not defined + + - name: Set root flag + set_fact: + is_root: "{{ uid_check_multi.stdout | trim == '0' }}" + allow_root_flag: "{{ '--allow-run-as-root' if uid_check_multi.stdout | trim == '0' else '' }}" + run_once: true + when: is_root is not defined + + - name: Build MPI config + set_fact: + mpi_host_slots: "{{ groups['all'] | map('extract', hostvars, 'mgmt_ip') | map('regex_replace', '$', ':' ~ gpu_per_node) | join(',') }}" + total_np: "{{ groups['all'] | length * gpu_per_node | int }}" + ib_hca_csv: "{{ hostvars[groups['all'][0]].ib_hca_list | join(',') }}" + run_once: true + + - name: Run all_reduce_perf multi-node + shell: | + set -e + echo "=== MPI Multi-Node ===" > {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "Total Processes: {{ total_np }}" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "Hosts: {{ mpi_host_slots }}" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "Management Interface: {{ mgmt_iface }}" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "IB HCAs: {{ ib_hca_csv }}" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + + CMD="mpirun -np {{ total_np }} -H {{ mpi_host_slots }} {{ allow_root_flag }} \ + -x NCCL_DEBUG_SUBSYS=ALL \ + -x NCCL_NET=IB \ + -x NCCL_NET_GDR_READ=1 \ + -x NCCL_NET_GDR_LEVEL=2 \ + -x NCCL_TOPO_DUMP_FILE=/tmp/topo.xml \ + -x NCCL_IB_HCA={{ ib_hca_csv }} \ + -x NCCL_SOCKET_IFNAME={{ mgmt_iface }} \ + -x NCCL_IB_MERGE_NICS=0 \ + {{ nccl_test_dir }}/all_reduce_perf -b 1K -e 1G -f 2 -g 1" + echo "--- all_reduce_perf ---" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "Command: $CMD" >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt + echo "Running: $CMD" + + $CMD >> {{ output_dir }}/{{ timestamp }}/mpi_multi_test.txt 2>&1 + run_once: true + delegate_to: "{{ groups['all'] | first }}" + become: no + when: not skip_mpi_multi | bool and groups['all'] | length > 1 + + # ==================== Collect Results ==================== + # Fetch from all nodes + - name: Fetch results + synchronize: + src: "{{ output_dir }}/{{ timestamp }}/" + dest: "./ib_test_results/{{ timestamp }}/raw/" + mode: pull + ignore_errors: yes + + # Merge IB write reports on controller + - name: Merge IB write reports + local_action: + module: shell + cmd: | + cd ./ib_test_results/{{ timestamp }} + + # Create merged report + echo "================================================================================" > ib_write_report.txt + echo "RDMA 带宽/延迟测试报告" >> ib_write_report.txt + echo "================================================================================" >> ib_write_report.txt + echo "时间: $(date '+%Y-%m-%d %H:%M:%S')" >> ib_write_report.txt + echo "命令: ib_write_bw/lat -d -F --report_gbits --output=bandwidth/latency" >> ib_write_report.txt + echo "" >> ib_write_report.txt + + # BW section + echo "--- 吞吐测试结果 ---" >> ib_write_report.txt + printf "%-16s %-16s %-12s %-12s %-12s\n" "源节点" "目标节点" "源设备" "目标设备" "带宽(Gb/s)" >> ib_write_report.txt + echo "--------------------------------------------------------------------------------" >> ib_write_report.txt + for f in raw/ib_write_*.txt; do + [ -f "$f" ] && grep "^BW " "$f" | while read _ src dst sdev ddev bw; do + printf "%-16s %-16s %-12s %-12s %-12s\n" "$src" "$dst" "$sdev" "$ddev" "$bw" + done >> ib_write_report.txt + done + + # LAT section + echo "" >> ib_write_report.txt + echo "--- 延迟测试结果 ---" >> ib_write_report.txt + printf "%-16s %-16s %-12s %-12s %-12s\n" "源节点" "目标节点" "源设备" "目标设备" "延迟(usec)" >> ib_write_report.txt + echo "--------------------------------------------------------------------------------" >> ib_write_report.txt + for f in raw/ib_write_*.txt; do + [ -f "$f" ] && grep "^LAT " "$f" | while read _ src dst sdev ddev lat; do + printf "%-16s %-16s %-12s %-12s %-12s\n" "$src" "$dst" "$sdev" "$ddev" "$lat" + done >> ib_write_report.txt + done + + # Copy ping report if exists + [ -f raw/ping_report.txt ] && cp raw/ping_report.txt . || true + + # Copy MPI reports if exist + [ -f raw/mpi_single_test.txt ] && cp raw/mpi_single_test.txt . || true + [ -f raw/mpi_multi_test.txt ] && cp raw/mpi_multi_test.txt . || true + + # Always exit success + exit 0 + run_once: true + ignore_errors: yes + + - name: Show report location + debug: + msg: | + 测试完成!报告位置: ./ib_test_results/{{ timestamp }}/ + - ib_write_report.txt : RDMA 带宽/延迟测试 (已合并所有节点) + - ping_report.txt : Ping 连通性测试 + - mpi_single_test.txt : MPI 单机测试 + - mpi_multi_test.txt : MPI 多机测试 + run_once: true diff --git a/tools/scripts/ansible/inventory.ini.example b/tools/scripts/ansible/inventory.ini.example new file mode 100644 index 0000000000..a3ba921886 --- /dev/null +++ b/tools/scripts/ansible/inventory.ini.example @@ -0,0 +1,18 @@ +# Ansible inventory file example +# Copy this file to inventory.ini and modify according to your environment + +[all] +node1 ansible_host=10.100.8.11 +node2 ansible_host=10.100.8.12 +node3 ansible_host=10.100.8.13 +node4 ansible_host=10.100.8.14 + +[all:vars] +ansible_user=toor +ansible_ssh_private_key_file=~/.ssh/id_rsa +# Or use password authentication: +# ansible_ssh_pass=your_password +# ansible_become_pass=your_sudo_password + +# Rack configuration (optional, for same-rack/cross-rack testing) +# Define in ib_network_test.yml vars section diff --git a/tools/scripts/ansible/rdma_bw_test_client.sh b/tools/scripts/ansible/rdma_bw_test_client.sh new file mode 100644 index 0000000000..ffd919e726 --- /dev/null +++ b/tools/scripts/ansible/rdma_bw_test_client.sh @@ -0,0 +1,495 @@ +#!/bin/bash +# RDMA 带宽测试 - 客户端脚本 +# 环境变量: +# EXCLUDE_NICS - 要排除的网卡列表,用逗号分隔 (例如: eth0,eth1) +# DURATION - 测试持续时间,单位秒 (默认: 10) +# COMMAND - RDMA 测试命令,默认 ib_write_bw,可设置为 ib_read_bw/ib_write_lat/ib_read_lat +# +# 示例: +# ./rdma_bw_test_client.sh 10.10.1.12 10.10.2.12 10.10.3.12 +# EXCLUDE_NICS=eth0,eth1 ./rdma_bw_test_client.sh 10.10.1.12 +# DURATION=20 ./rdma_bw_test_client.sh 10.10.1.12 ... # 测试 20 秒 +# COMMAND=ib_write_lat ./rdma_bw_test_client.sh 10.10.1.12 ... # 延时测试 + +set -e + +# 检查参数 +if [ "$#" -lt 1 ]; then + echo "用法: $0 ... " + echo "" + echo "示例 (测试 8 个网卡):" + echo " $0 10.10.1.12 10.10.17.12 10.10.33.12 10.10.49.12 10.10.65.12 10.10.81.12 10.10.97.12 10.10.113.12" + echo "" + echo "示例 (只测试部分网卡):" + echo " $0 10.10.1.12 10.10.17.12" + echo "" + exit 1 +fi + +# 将参数转为数组 +DEST_IPS=("$@") +TARGET_NAME="对端节点 (${#DEST_IPS[@]} 个 IP)" + +# 默认测试持续时间 (可通过环境变量覆盖) +DURATION="${DURATION:-10}" + +# RDMA 测试命令: 默认 ib_write_bw,可通过环境变量 COMMAND 覆盖 +COMMAND="${COMMAND:-ib_write_bw}" + +echo "接收到 ${#DEST_IPS[@]} 个目标 IP:" +for i in "${!DEST_IPS[@]}"; do + echo " [$i] ${DEST_IPS[$i]}" +done +echo "" + +# 获取 RDMA 设备列表 +get_rdma_devices() { + ls /sys/class/infiniband/ 2>/dev/null | grep "^mlx5_" | sort +} + +# 从 RDMA 设备获取网卡名和 IP 地址 +get_netdev_ip_from_rdma() { + local rdma_dev="$1" + local device_path + local pci_addr + local netdev + local ip_addr + + # 通过 sysfs 获取 PCI 地址 + if [ -e "/sys/class/infiniband/$rdma_dev/device" ]; then + device_path=$(realpath "/sys/class/infiniband/$rdma_dev/device") + pci_addr=$(basename "$device_path") + + # 查找对应的网络接口 + for net in /sys/class/net/*; do + if [ -e "$net/device" ]; then + net_pci=$(basename "$(realpath "$net/device")") + if [ "$net_pci" = "$pci_addr" ]; then + netdev=$(basename "$net") + # 获取 IPv4 地址 + ip_addr=$(ip -4 addr show "$netdev" 2>/dev/null | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) + echo "$netdev:$ip_addr" + return 0 + fi + fi + done + fi + + echo "unknown:N/A" + return 1 +} + +# 检查网卡是否在排除列表中 +# 输入: RDMA 设备名 (如 mlx5_0) +# 返回: 0 表示应该排除,1 表示不排除 +should_exclude_device() { + local rdma_dev="$1" + + # 如果没有设置 EXCLUDE_NICS,不排除任何设备 + if [ -z "$EXCLUDE_NICS" ]; then + return 1 + fi + + # 获取该 RDMA 设备对应的网卡名 + local netdev_info + netdev_info=$(get_netdev_ip_from_rdma "$rdma_dev") + local netdev + netdev=$(echo "$netdev_info" | cut -d: -f1) + + # 将 EXCLUDE_NICS 按逗号分割 + IFS=',' read -ra EXCLUDE_LIST <<< "$EXCLUDE_NICS" + + # 检查网卡名是否在排除列表中 + for exclude_nic in "${EXCLUDE_LIST[@]}"; do + # 去除空格 + exclude_nic=$(echo "$exclude_nic" | xargs) + if [ "$netdev" = "$exclude_nic" ]; then + return 0 # 应该排除 + fi + done + + return 1 # 不排除 +} + +# 创建结果目录 +RESULT_DIR="./rdma_results_$(date '+%Y%m%d_%H%M%S')" +mkdir -p "$RESULT_DIR" + +echo "=== RDMA 带宽测试客户端 ===" +echo "本地节点: $(hostname) ($(hostname -I | awk '{print $1}'))" +echo "目标节点: $TARGET_NAME" +echo "结果目录: $RESULT_DIR" +echo "" + +# 获取所有本地 RDMA 设备 +LOCAL_RDMA_DEVS=($(get_rdma_devices)) + +if [ ${#LOCAL_RDMA_DEVS[@]} -eq 0 ]; then + echo "错误: 未找到本地 RDMA 设备" + exit 1 +fi + +# 显示配置信息 +echo "测试配置:" +echo " 消息大小: $SIZE_HUMAN ($MESSAGE_SIZE 字节)" +echo " 测试命令: $COMMAND" +if [ -n "$EXCLUDE_NICS" ]; then + echo " 排除网卡: $EXCLUDE_NICS" +fi +echo "" + +echo "本地 RDMA 设备:" +ACTIVE_LOCAL_DEVS=() +for dev in "${LOCAL_RDMA_DEVS[@]}"; do + netdev_info=$(get_netdev_ip_from_rdma "$dev") + netdev=$(echo "$netdev_info" | cut -d: -f1) + ip=$(echo "$netdev_info" | cut -d: -f2) + + if should_exclude_device "$dev"; then + printf " %-10s -> %-8s (%s) [已排除]\n" "$dev" "$netdev" "$ip" + else + printf " %-10s -> %-8s (%s)\n" "$dev" "$netdev" "$ip" + ACTIVE_LOCAL_DEVS+=("$dev") + fi +done +echo "" + +if [ ${#ACTIVE_LOCAL_DEVS[@]} -eq 0 ]; then + echo "错误: 所有本地设备都被排除,没有可用的设备" + exit 1 +fi + +echo "将用于测试的本地设备: ${#ACTIVE_LOCAL_DEVS[@]} 个" +echo "" + +# 判断是带宽测试还是延时测试 +IS_LATENCY_TEST=0 + +# 根据命令类型选择输出格式和测试参数 +# 使用 -D 持续时间代替 -s/-n 参数 +OPTS="-D $DURATION" +if [ "$COMMAND" = "ib_write_lat" ] || [ "$COMMAND" = "ib_read_lat" ]; then + IS_LATENCY_TEST=1 + OPTS+=" --output=latency" + + SUMMARY_FILE="$RESULT_DIR/latency_summary.txt" + echo "RDMA 延时测试结果汇总" > "$SUMMARY_FILE" + echo "测试时间: $(date)" >> "$SUMMARY_FILE" + echo "本地节点: $(hostname)" >> "$SUMMARY_FILE" + echo "目标节点: $TARGET_NAME" >> "$SUMMARY_FILE" + echo "测试持续时间: ${DURATION} 秒" >> "$SUMMARY_FILE" + echo "测试命令: ${COMMAND} -d ${OPTS}" >> "$SUMMARY_FILE" + echo "" >> "$SUMMARY_FILE" + printf "%-18s %-18s %-26s %-26s %-15s %s\n" "源RDMA设备" "源网卡" "源IP" "目的IP" "测试类型" "结果" >> "$SUMMARY_FILE" + echo "----------------------------------------------------------------------------------------------------" >> "$SUMMARY_FILE" +else + OPTS+=" --output=bandwidth" + + SUMMARY_FILE="$RESULT_DIR/summary.txt" + echo "RDMA 带宽测试结果汇总" > "$SUMMARY_FILE" + echo "测试时间: $(date)" >> "$SUMMARY_FILE" + echo "本地节点: $(hostname)" >> "$SUMMARY_FILE" + echo "目标节点: $TARGET_NAME" >> "$SUMMARY_FILE" + echo "测试持续时间: ${DURATION} 秒" >> "$SUMMARY_FILE" + echo "测试命令: ${COMMAND} -d ${OPTS}" >> "$SUMMARY_FILE" + echo "" >> "$SUMMARY_FILE" + printf "%-18s %-18s %-26s %-26s %-15s %s\n" "源RDMA设备" "源网卡" "源IP" "目的IP" "测试类型" "结果" >> "$SUMMARY_FILE" + echo "----------------------------------------------------------------------------------------------------" >> "$SUMMARY_FILE" +fi +OPTS+=" --report_gbits" + + + +# 开始测试 +total_tests=$((${#ACTIVE_LOCAL_DEVS[@]} * ${#DEST_IPS[@]})) +current_test=0 +success_count=0 +fail_count=0 + +# 用于计算平均值的累加变量(区分同轨/跨轨) +total_bw=0 +total_lat=0 +valid_samples=0 + +# 同轨统计 +total_bw_same=0 +total_lat_same=0 +valid_samples_same=0 +success_count_same=0 +fail_count_same=0 + +# 跨轨统计 +total_bw_cross=0 +total_lat_cross=0 +valid_samples_cross=0 +success_count_cross=0 +fail_count_cross=0 + +echo "开始带宽测试 (${#ACTIVE_LOCAL_DEVS[@]}x${#DEST_IPS[@]} = $total_tests 次)..." +echo "" + +for src_idx in "${!ACTIVE_LOCAL_DEVS[@]}"; do + SRC_RDMA="${ACTIVE_LOCAL_DEVS[$src_idx]}" + + # 获取源设备信息 + netdev_info=$(get_netdev_ip_from_rdma "$SRC_RDMA") + SRC_NETDEV=$(echo "$netdev_info" | cut -d: -f1) + SRC_IP=$(echo "$netdev_info" | cut -d: -f2) + + echo "========================================" + echo "源设备: $SRC_RDMA ($SRC_NETDEV - $SRC_IP)" + echo "========================================" + + for dst_idx in "${!DEST_IPS[@]}"; do + DEST_IP="${DEST_IPS[$dst_idx]}" + DEST_NETDEV="ib$dst_idx" + + current_test=$((current_test + 1)) + + # 判断是同轨还是跨轨 + if [ "$src_idx" -eq "$dst_idx" ]; then + TEST_TYPE="同轨" + else + TEST_TYPE="跨轨" + fi + + echo -n "[$current_test/$total_tests] $SRC_RDMA -> $DEST_IP ($DEST_NETDEV) [$TEST_TYPE] ... " + + # 打印本次测试将要执行的客户端命令 + CLIENT_CMD="$COMMAND -d $SRC_RDMA $DEST_IP $OPTS -F" + echo "" + echo " 客户端命令: $CLIENT_CMD" + + # 执行测试,支持最多3次重试 + MAX_RETRIES=3 + RETRY_COUNT=0 + TEST_SUCCESS=0 + + while [ $RETRY_COUNT -lt $MAX_RETRIES ] && [ $TEST_SUCCESS -eq 0 ]; do + TEMP_OUTPUT=$(mktemp) + TEST_EXIT_CODE=0 + + if [ $RETRY_COUNT -gt 0 ]; then + echo " 重试 $RETRY_COUNT/$((MAX_RETRIES-1))..." + sleep 2 + fi + + eval "$CLIENT_CMD" > "$TEMP_OUTPUT" 2>&1 || TEST_EXIT_CODE=$? + + if [ $TEST_EXIT_CODE -eq 0 ]; then + TEST_SUCCESS=1 + + if [ $IS_LATENCY_TEST -eq 1 ]; then + # 延时测试:提取延时结果 (usec) + LAT=$(grep -oP '\d+\.\d+' "$TEMP_OUTPUT" | tail -1 2>/dev/null || echo "N/A") + if [ "$LAT" != "N/A" ]; then + echo "✅ 成功 (延时: ${LAT} usec)" + RESULT="成功 (延时: ${LAT} usec)" + # 累加延时用于计算平均值 + total_lat=$(awk "BEGIN {printf \"%.2f\", $total_lat + $LAT}") + valid_samples=$((valid_samples + 1)) + # 区分同轨/跨轨统计 + if [ "$TEST_TYPE" = "同轨" ]; then + total_lat_same=$(awk "BEGIN {printf \"%.2f\", $total_lat_same + $LAT}") + valid_samples_same=$((valid_samples_same + 1)) + else + total_lat_cross=$(awk "BEGIN {printf \"%.2f\", $total_lat_cross + $LAT}") + valid_samples_cross=$((valid_samples_cross + 1)) + fi + else + echo "✅ 成功" + RESULT="成功" + fi + else + # 带宽测试:提取带宽结果 (Gb/s) + BW=$(grep -oP '\d+\.\d+' "$TEMP_OUTPUT" | tail -1 2>/dev/null || echo "N/A") + if [ "$BW" != "N/A" ]; then + BW_GBPS=$(awk "BEGIN {printf \"%.2f\", $BW}") + echo "✅ 成功 (${BW_GBPS} Gb/s)" + RESULT="成功 (${BW_GBPS} Gb/s)" + # 累加带宽用于计算平均值 + total_bw=$(awk "BEGIN {printf \"%.2f\", $total_bw + $BW}") + valid_samples=$((valid_samples + 1)) + # 区分同轨/跨轨统计 + if [ "$TEST_TYPE" = "同轨" ]; then + total_bw_same=$(awk "BEGIN {printf \"%.2f\", $total_bw_same + $BW}") + valid_samples_same=$((valid_samples_same + 1)) + else + total_bw_cross=$(awk "BEGIN {printf \"%.2f\", $total_bw_cross + $BW}") + valid_samples_cross=$((valid_samples_cross + 1)) + fi + else + echo "✅ 成功 (${BW} Gb/s)" + RESULT="成功 (${BW} Gb/s)" + fi + fi + success_count=$((success_count + 1)) + # 区分同轨/跨轨成功计数 + if [ "$TEST_TYPE" = "同轨" ]; then + success_count_same=$((success_count_same + 1)) + else + success_count_cross=$((success_count_cross + 1)) + fi + else + RETRY_COUNT=$((RETRY_COUNT + 1)) + + # 如果是最后一次重试仍然失败 + if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then + # 测试失败,提取原始错误信息 + if [ $TEST_EXIT_CODE -eq 124 ]; then + ERROR_MSG="超时 (30秒)" + else + # 提取最后几行非空错误信息 + ERROR_MSG=$(grep -v "^$" "$TEMP_OUTPUT" | tail -3 | tr '\n' ' ' | cut -c1-200) + if [ -z "$ERROR_MSG" ]; then + ERROR_MSG="退出码: $TEST_EXIT_CODE" + fi + fi + + echo "❌ 失败 (重试 $((MAX_RETRIES)) 次后仍失败)" + echo " 错误: $ERROR_MSG" + RESULT="失败: $ERROR_MSG" + fail_count=$((fail_count + 1)) + # 区分同轨/跨轨失败计数 + if [ "$TEST_TYPE" = "同轨" ]; then + fail_count_same=$((fail_count_same + 1)) + else + fail_count_cross=$((fail_count_cross + 1)) + fi + fi + fi + + # 删除临时文件 + rm -f "$TEMP_OUTPUT" + done + + # 记录到汇总文件 + printf "%-12s %-10s %-18s %-18s %-15s %s\n" \ + "$SRC_RDMA" "$SRC_NETDEV" "$SRC_IP" "$DEST_IP" "$TEST_TYPE" "$RESULT" >> "$SUMMARY_FILE" + + # 等待 1 秒,让服务端准备好下一次测试 + sleep 1 + done + + echo "" +done + +# 输出测试统计 +echo "========================================" +echo "测试完成统计" +echo "========================================" +echo "总测试数: $total_tests" +echo "成功: $success_count" +echo "失败: $fail_count" +echo "成功率: $(awk "BEGIN {printf \"%.2f\", ($success_count/$total_tests)*100}")%" +echo "" + +# 计算并输出总体平均值 +if [ $valid_samples -gt 0 ]; then + if [ $IS_LATENCY_TEST -eq 1 ]; then + avg_lat=$(awk "BEGIN {printf \"%.2f\", $total_lat / $valid_samples}") + echo "总体平均延时: ${avg_lat} usec (基于 $valid_samples 个有效样本)" + else + avg_bw=$(awk "BEGIN {printf \"%.2f\", $total_bw / $valid_samples}") + echo "总体平均带宽: ${avg_bw} Gb/s (基于 $valid_samples 个有效样本)" + fi +fi + +# 输出同轨统计 +echo "" +echo "--- 同轨测试统计 ---" +total_same=$((success_count_same + fail_count_same)) +if [ $total_same -gt 0 ]; then + echo "测试数: $total_same (成功: $success_count_same, 失败: $fail_count_same)" + if [ $valid_samples_same -gt 0 ]; then + if [ $IS_LATENCY_TEST -eq 1 ]; then + avg_lat_same=$(awk "BEGIN {printf \"%.2f\", $total_lat_same / $valid_samples_same}") + echo "同轨平均延时: ${avg_lat_same} usec (基于 $valid_samples_same 个有效样本)" + else + avg_bw_same=$(awk "BEGIN {printf \"%.2f\", $total_bw_same / $valid_samples_same}") + echo "同轨平均带宽: ${avg_bw_same} Gb/s (基于 $valid_samples_same 个有效样本)" + fi + fi +else + echo "无同轨测试" +fi + +# 输出跨轨统计 +echo "" +echo "--- 跨轨测试统计 ---" +total_cross=$((success_count_cross + fail_count_cross)) +if [ $total_cross -gt 0 ]; then + echo "测试数: $total_cross (成功: $success_count_cross, 失败: $fail_count_cross)" + if [ $valid_samples_cross -gt 0 ]; then + if [ $IS_LATENCY_TEST -eq 1 ]; then + avg_lat_cross=$(awk "BEGIN {printf \"%.2f\", $total_lat_cross / $valid_samples_cross}") + echo "跨轨平均延时: ${avg_lat_cross} usec (基于 $valid_samples_cross 个有效样本)" + else + avg_bw_cross=$(awk "BEGIN {printf \"%.2f\", $total_bw_cross / $valid_samples_cross}") + echo "跨轨平均带宽: ${avg_bw_cross} Gb/s (基于 $valid_samples_cross 个有效样本)" + fi + fi +else + echo "无跨轨测试" +fi + +echo "" +echo "结果已保存到: $SUMMARY_FILE" + +# 追加统计到汇总文件 +echo "" >> "$SUMMARY_FILE" +echo "=======================================" >> "$SUMMARY_FILE" +echo "测试统计" >> "$SUMMARY_FILE" +echo "=======================================" >> "$SUMMARY_FILE" +echo "总测试数: $total_tests" >> "$SUMMARY_FILE" +echo "成功: $success_count" >> "$SUMMARY_FILE" +echo "失败: $fail_count" >> "$SUMMARY_FILE" +echo "成功率: $(awk "BEGIN {printf \"%.2f\", ($success_count/$total_tests)*100}")%" >> "$SUMMARY_FILE" +echo "" >> "$SUMMARY_FILE" + +# 追加总体平均值到汇总文件 +if [ $valid_samples -gt 0 ]; then + if [ $IS_LATENCY_TEST -eq 1 ]; then + avg_lat=$(awk "BEGIN {printf \"%.2f\", $total_lat / $valid_samples}") + echo "总体平均延时: ${avg_lat} usec (基于 $valid_samples 个有效样本)" >> "$SUMMARY_FILE" + else + avg_bw=$(awk "BEGIN {printf \"%.2f\", $total_bw / $valid_samples}") + echo "总体平均带宽: ${avg_bw} Gb/s (基于 $valid_samples 个有效样本)" >> "$SUMMARY_FILE" + fi +fi + +# 追加同轨统计到汇总文件 +echo "" >> "$SUMMARY_FILE" +echo "--- 同轨测试统计 ---" >> "$SUMMARY_FILE" +if [ $total_same -gt 0 ]; then + echo "测试数: $total_same (成功: $success_count_same, 失败: $fail_count_same)" >> "$SUMMARY_FILE" + if [ $valid_samples_same -gt 0 ]; then + if [ $IS_LATENCY_TEST -eq 1 ]; then + avg_lat_same=$(awk "BEGIN {printf \"%.2f\", $total_lat_same / $valid_samples_same}") + echo "同轨平均延时: ${avg_lat_same} usec (基于 $valid_samples_same 个有效样本)" >> "$SUMMARY_FILE" + else + avg_bw_same=$(awk "BEGIN {printf \"%.2f\", $total_bw_same / $valid_samples_same}") + echo "同轨平均带宽: ${avg_bw_same} Gb/s (基于 $valid_samples_same 个有效样本)" >> "$SUMMARY_FILE" + fi + fi +else + echo "无同轨测试" >> "$SUMMARY_FILE" +fi + +# 追加跨轨统计到汇总文件 +echo "" >> "$SUMMARY_FILE" +echo "--- 跨轨测试统计 ---" >> "$SUMMARY_FILE" +if [ $total_cross -gt 0 ]; then + echo "测试数: $total_cross (成功: $success_count_cross, 失败: $fail_count_cross)" >> "$SUMMARY_FILE" + if [ $valid_samples_cross -gt 0 ]; then + if [ $IS_LATENCY_TEST -eq 1 ]; then + avg_lat_cross=$(awk "BEGIN {printf \"%.2f\", $total_lat_cross / $valid_samples_cross}") + echo "跨轨平均延时: ${avg_lat_cross} usec (基于 $valid_samples_cross 个有效样本)" >> "$SUMMARY_FILE" + else + avg_bw_cross=$(awk "BEGIN {printf \"%.2f\", $total_bw_cross / $valid_samples_cross}") + echo "跨轨平均带宽: ${avg_bw_cross} Gb/s (基于 $valid_samples_cross 个有效样本)" >> "$SUMMARY_FILE" + fi + fi +else + echo "无跨轨测试" >> "$SUMMARY_FILE" +fi diff --git a/tools/scripts/ansible/rdma_bw_test_server.sh b/tools/scripts/ansible/rdma_bw_test_server.sh new file mode 100644 index 0000000000..b846a6a040 --- /dev/null +++ b/tools/scripts/ansible/rdma_bw_test_server.sh @@ -0,0 +1,207 @@ +#!/bin/bash +# RDMA 带宽测试 - 服务端脚本 +# 用法: ./rdma_bw_test_server.sh [rdma_dev1 rdma_dev2 ...] +# +# 环境变量: +# EXCLUDE_NICS - 要排除的网卡列表,用逗号分隔 (例如: eth0,eth1) +# DURATION - 测试持续时间,单位秒 (默认: 10) +# COMMAND - RDMA 测试命令,默认 ib_write_bw,可设置为 ib_read_bw/ib_write_lat/ib_read_lat +# +# 示例: +# ./rdma_bw_test_server.sh mlx5_0 mlx5_1 # 只启动 mlx5_0 和 mlx5_1 +# ./rdma_bw_test_server.sh # 启动所有设备 +# EXCLUDE_NICS=eth0,eth1 ./rdma_bw_test_server.sh # 排除 eth0 和 eth1 对应的设备 +# DURATION=20 ./rdma_bw_test_server.sh # 测试 20 秒 + +set -e + +# RDMA 测试命令: 默认 ib_write_bw,可通过环境变量 COMMAND 覆盖 +COMMAND="${COMMAND:-ib_write_bw}" + +# 默认测试持续时间 (可通过环境变量覆盖) +DURATION="${DURATION:-10}" + +# 获取 RDMA 设备列表 +get_rdma_devices() { + ls /sys/class/infiniband/ 2>/dev/null | grep "^mlx5_" +} + +# 从 RDMA 设备获取 IP 地址 +get_ip_from_rdma_dev() { + local rdma_dev="$1" + local device_path + local pci_addr + local netdev + local ip_addr + + # 通过 sysfs 获取 PCI 地址 + if [ -e "/sys/class/infiniband/$rdma_dev/device" ]; then + device_path=$(realpath "/sys/class/infiniband/$rdma_dev/device") + pci_addr=$(basename "$device_path") + + # 查找对应的网络接口 + for net in /sys/class/net/*; do + if [ -e "$net/device" ]; then + net_pci=$(basename "$(realpath "$net/device")") + if [ "$net_pci" = "$pci_addr" ]; then + netdev=$(basename "$net") + # 获取 IPv4 地址 + ip_addr=$(ip -4 addr show "$netdev" 2>/dev/null | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) + if [ -n "$ip_addr" ]; then + echo "$netdev:$ip_addr" + return 0 + fi + fi + fi + done + fi + + echo "unknown:N/A" + return 1 +} + +# 检查网卡是否在排除列表中 +# 输入: RDMA 设备名 (如 mlx5_0) +# 返回: 0 表示应该排除,1 表示不排除 +should_exclude_device() { + local rdma_dev="$1" + + # 如果没有设置 EXCLUDE_NICS,不排除任何设备 + if [ -z "$EXCLUDE_NICS" ]; then + return 1 + fi + + # 获取该 RDMA 设备对应的网卡名 + local netdev_info + netdev_info=$(get_ip_from_rdma_dev "$rdma_dev") + local netdev + netdev=$(echo "$netdev_info" | cut -d: -f1) + + # 将 EXCLUDE_NICS 按逗号分割 + IFS=',' read -ra EXCLUDE_LIST <<< "$EXCLUDE_NICS" + + # 检查网卡名是否在排除列表中 + for exclude_nic in "${EXCLUDE_LIST[@]}"; do + # 去除空格 + exclude_nic=$(echo "$exclude_nic" | xargs) + if [ "$netdev" = "$exclude_nic" ]; then + return 0 # 应该排除 + fi + done + + return 1 # 不排除 +} + +echo "=== RDMA 带宽测试服务端 ===" +echo "节点信息: $(hostname) ($(hostname -I | awk '{print $1}'))" +echo "测试命令: $COMMAND" +echo "" + +# 判断是使用参数指定的设备还是所有设备 +if [ "$#" -gt 0 ]; then + # 使用命令行参数指定的设备 + RDMA_DEVS=("$@") + echo "使用指定的 RDMA 设备: ${RDMA_DEVS[*]}" + + # 验证设备是否存在 + for dev in "${RDMA_DEVS[@]}"; do + if [ ! -e "/sys/class/infiniband/$dev" ]; then + echo "错误: RDMA 设备 $dev 不存在" + exit 1 + fi + done +else + # 获取所有 RDMA 设备 + RDMA_DEVS=($(get_rdma_devices)) + + if [ ${#RDMA_DEVS[@]} -eq 0 ]; then + echo "错误: 未找到 RDMA 设备" + exit 1 + fi + + echo "使用所有 RDMA 设备" +fi + +echo "" + +# 显示排除信息 +if [ -n "$EXCLUDE_NICS" ]; then + echo "排除的网卡: $EXCLUDE_NICS" + echo "" +fi + +echo "发现 ${#RDMA_DEVS[@]} 个 RDMA 设备:" +ACTIVE_DEVS=() +for dev in "${RDMA_DEVS[@]}"; do + netdev_info=$(get_ip_from_rdma_dev "$dev") + netdev=$(echo "$netdev_info" | cut -d: -f1) + ip=$(echo "$netdev_info" | cut -d: -f2) + + if should_exclude_device "$dev"; then + printf " %-10s -> %-8s (%s) [已排除]\n" "$dev" "$netdev" "$ip" + else + printf " %-10s -> %-8s (%s)\n" "$dev" "$netdev" "$ip" + ACTIVE_DEVS+=("$dev") + fi +done +echo "" + +if [ ${#ACTIVE_DEVS[@]} -eq 0 ]; then + echo "错误: 所有设备都被排除,没有可用的设备" + exit 1 +fi + +echo "将启动服务的设备: ${#ACTIVE_DEVS[@]} 个" +echo "" + +# 每个设备需要启动 8 次服务(因为每次客户端连接后服务端会退出) +TESTS_PER_DEVICE=8 + +echo "开始启动服务端 (每个设备启动 $TESTS_PER_DEVICE 次)..." +echo "按 Ctrl+C 停止" +echo "" + +# 循环启动服务(只启动未被排除的设备) +for rdma_dev in "${ACTIVE_DEVS[@]}"; do + netdev_info=$(get_ip_from_rdma_dev "$rdma_dev") + netdev=$(echo "$netdev_info" | cut -d: -f1) + ip=$(echo "$netdev_info" | cut -d: -f2) + + echo "----------------------------------------" + echo "设备: $rdma_dev ($netdev - $ip)" + echo "----------------------------------------" + + for i in $(seq 1 $TESTS_PER_DEVICE); do + echo "[$(date '+%H:%M:%S')] 启动第 $i/$TESTS_PER_DEVICE 次服务..." + + # 根据命令类型选择输出格式和测试参数 + # 使用 -D 持续时间代替 -s/-n 参数 + OPTS="-D $DURATION" + if [ "$COMMAND" = "ib_write_lat" ] || [ "$COMMAND" = "ib_read_lat" ]; then + OPTS+=" --output=latency" + else + OPTS+=" --output=bandwidth" + fi + OPTS+=" --report_gbits" + + # 打印本次测试将要执行的服务端命令 + SERVER_CMD="$COMMAND -d $rdma_dev $OPTS -F" + echo " 服务端命令: $SERVER_CMD" + + # 启动 RDMA 测试服务端 + if eval "$SERVER_CMD" 2>&1; then + echo "[$(date '+%H:%M:%S')] 第 $i 次测试完成" + else + echo "[$(date '+%H:%M:%S')] 第 $i 次测试失败或被中断" + fi + + # 等待 1 秒再启动下一次 + if [ $i -lt $TESTS_PER_DEVICE ]; then + sleep 1 + fi + done + + echo "" +done + +echo "=== 所有服务端测试完成 ===" diff --git a/tools/scripts/ansible/run_script.yml b/tools/scripts/ansible/run_script.yml new file mode 100644 index 0000000000..884f0f68ce --- /dev/null +++ b/tools/scripts/ansible/run_script.yml @@ -0,0 +1,239 @@ +--- +# Ansible playbook for configuring RDMA NIC mode and enabling IPoIB +# +# Usage examples: +# # Configure GPU NICs to InfiniBand mode (default) +# ansible-playbook -i inventory.ini run_script.yml +# +# # Configure all NICs to RoCE mode +# ansible-playbook -i inventory.ini run_script.yml -e "rdma_mode=roce" +# +# # Configure GPU NICs to IB, other NICs to RoCE +# ansible-playbook -i inventory.ini run_script.yml -e "gpu_rdma_mode=infiniband other_rdma_mode=roce" +# +# # Query current configuration only +# ansible-playbook -i inventory.ini run_script.yml -e "query_only=true" + +- name: Configure RDMA NIC mode and enable IPoIB + hosts: all + become: yes + remote_user: toor + vars: + # Script location + script_path: /usr/local/src/daocloud/setNicRdmaMode.sh + + # RDMA mode configuration (Scenario 1: unified mode for all NICs) + # Options: "roce" or "infiniband" + rdma_mode: "" + + # RDMA mode configuration (Scenario 2: separate mode for GPU and other NICs) + # If set, these take precedence over rdma_mode + gpu_rdma_mode: "infiniband" + other_rdma_mode: "" + + # Query only mode (no configuration changes) + query_only: false + + tasks: + # ==================== Query Mode ==================== + - name: Query current RDMA configuration + shell: "{{ script_path }} q" + register: query_result + when: query_only | bool + ignore_errors: yes + + - name: Display current configuration + debug: + msg: | + === {{ inventory_hostname }} - Current RDMA Configuration === + {{ query_result.stdout }} + when: query_only | bool + + - name: End play for query mode + meta: end_host + when: query_only | bool + + # ==================== Configuration Mode ==================== + - name: Check if script exists + stat: + path: "{{ script_path }}" + register: script_stat + + - name: Fail if script does not exist + fail: + msg: "Script {{ script_path }} does not exist on {{ inventory_hostname }}" + when: not script_stat.stat.exists + + - name: Ensure script is executable + file: + path: "{{ script_path }}" + mode: '0755' + + # Build environment variables for the script + - name: Set environment variables for RDMA configuration + set_fact: + rdma_env: >- + {% if rdma_mode != "" %} + RDMA_MODE='{{ rdma_mode }}' + {% else %} + {% if gpu_rdma_mode != "" %}GPU_RDMA_MODE='{{ gpu_rdma_mode }}'{% endif %} + {% if other_rdma_mode != "" %} OTHER_RDMA_MODE='{{ other_rdma_mode }}'{% endif %} + {% endif %} + + - name: Display configuration to be applied + debug: + msg: "Configuring RDMA on {{ inventory_hostname }} with: {{ rdma_env | trim }}" + + # Execute the RDMA mode configuration script + - name: Execute setNicRdmaMode.sh + shell: "{{ rdma_env | trim }} {{ script_path }}" + register: rdma_result + ignore_errors: yes + + - name: Display RDMA configuration result + debug: + msg: | + === {{ inventory_hostname }} - RDMA Configuration Result === + {{ rdma_result.stdout }} + {% if rdma_result.stderr %} + STDERR: {{ rdma_result.stderr }} + {% endif %} + + # ==================== Load Kernel Modules ==================== + # Always load NVIDIA GPU-related modules + - name: Load nvidia_peermem kernel module + modprobe: + name: nvidia_peermem + state: present + ignore_errors: yes + register: nvidia_peermem_result + + - name: Display nvidia_peermem module status + debug: + msg: "nvidia_peermem module: {{ 'loaded' if nvidia_peermem_result is succeeded else 'not available or failed' }}" + + - name: Load gdrdrv kernel module + modprobe: + name: gdrdrv + state: present + ignore_errors: yes + register: gdrdrv_result + + - name: Display gdrdrv module status + debug: + msg: "gdrdrv module: {{ 'loaded' if gdrdrv_result is succeeded else 'not available or failed' }}" + + # Determine if we're in InfiniBand mode + - name: Check if InfiniBand mode is configured + set_fact: + is_infiniband_mode: >- + {{ (rdma_mode == "infiniband") or (gpu_rdma_mode == "infiniband") or (other_rdma_mode == "infiniband") }} + + - name: Display InfiniBand mode detection + debug: + msg: "InfiniBand mode detected: {{ is_infiniband_mode }}" + + # Load ib_ipoib only for InfiniBand mode + - name: Check if ib_ipoib module is loaded + shell: lsmod | grep -q ib_ipoib + register: ipoib_loaded + ignore_errors: yes + changed_when: false + when: is_infiniband_mode | bool + + - name: Load ib_ipoib kernel module (InfiniBand mode only) + modprobe: + name: ib_ipoib + state: present + when: + - is_infiniband_mode | bool + - ipoib_loaded.rc != 0 + register: ipoib_load_result + + - name: Verify ib_ipoib module is loaded + shell: lsmod | grep ib_ipoib + register: ipoib_verify + changed_when: false + when: is_infiniband_mode | bool + + - name: Display ib_ipoib module status + debug: + msg: "ib_ipoib module loaded: {{ ipoib_verify.stdout }}" + when: is_infiniband_mode | bool + + - name: Skip ib_ipoib module (not in InfiniBand mode) + debug: + msg: "Skipping ib_ipoib module - not in InfiniBand mode" + when: not (is_infiniband_mode | bool) + + # ==================== Bring Up IB Interfaces ==================== + - name: Find all IB interfaces + shell: | + # Find interfaces with InfiniBand link layer + for dev in /sys/class/infiniband/*; do + if [ -d "$dev" ]; then + dev_name=$(basename "$dev") + for port in "$dev"/ports/*; do + if [ -d "$port" ]; then + link_layer=$(cat "$port/link_layer" 2>/dev/null) + if [ "$link_layer" = "InfiniBand" ]; then + # Find corresponding network interface (ib*) + port_num=$(basename "$port") + # Look for IPoIB interface + for iface in /sys/class/net/ib*; do + if [ -d "$iface" ]; then + echo $(basename "$iface") + fi + done + fi + fi + done + fi + done | sort -u + register: ib_interfaces + changed_when: false + + - name: Display found IB interfaces + debug: + msg: "Found IB interfaces: {{ ib_interfaces.stdout_lines }}" + when: ib_interfaces.stdout_lines | length > 0 + + - name: Bring up IB interfaces + shell: "ip link set {{ item }} up" + loop: "{{ ib_interfaces.stdout_lines }}" + when: ib_interfaces.stdout_lines | length > 0 + ignore_errors: yes + register: iface_up_result + + - name: Display IB interface status + shell: | + echo "=== IB Interface Status ===" + for iface in {{ ib_interfaces.stdout_lines | join(' ') }}; do + echo "--- $iface ---" + ip addr show $iface 2>/dev/null | head -5 + done + register: iface_status + when: ib_interfaces.stdout_lines | length > 0 + changed_when: false + + - name: Show IB interface status + debug: + msg: "{{ iface_status.stdout }}" + when: ib_interfaces.stdout_lines | length > 0 + + # ==================== Final Verification ==================== + - name: Verify RDMA link layer status + shell: | + echo "=== RDMA Link Layer Status ===" + for dev in $(ls /sys/class/infiniband/ 2>/dev/null); do + echo "--- $dev ---" + ibstat $dev 2>/dev/null | grep -E "State|Physical state|Link layer" || echo " Unable to get status" + done + register: rdma_status + changed_when: false + + - name: Display final RDMA status + debug: + msg: | + === {{ inventory_hostname }} - Final Status === + {{ rdma_status.stdout }} diff --git a/tools/scripts/ansible/setup_ssh_keys.sh b/tools/scripts/ansible/setup_ssh_keys.sh new file mode 100755 index 0000000000..c42648928d --- /dev/null +++ b/tools/scripts/ansible/setup_ssh_keys.sh @@ -0,0 +1,368 @@ +#!/bin/bash +# SSH 免密登录批量检查和配置脚本 +# 用法: ./setup_ssh_keys.sh [inventory_file] [check|setup] + +set -e + +INVENTORY_FILE="${1:-inventory.ini}" +ACTION="${2:-check}" +SSH_USER="${3:-toor}" +SSH_PASS="${4:-root@123}" +SSH_KEY="$HOME/.ssh/id_rsa" + +# 颜色输出 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log_ok() { echo -e "${GREEN}[OK]${NC} $1"; } +log_fail() { echo -e "${RED}[FAIL]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_info() { echo -e "[INFO] $1"; } + +# 从 inventory 文件提取主机列表 +get_hosts() { + if [ ! -f "$INVENTORY_FILE" ]; then + echo "错误: 找不到 inventory 文件: $INVENTORY_FILE" >&2 + exit 1 + fi + # 只提取 IP 地址(跳过注释、组名和变量定义) + grep -oE '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' "$INVENTORY_FILE" | \ + sort -u +} + +# 检查本地 SSH 密钥 +check_local_key() { + log_info "检查本地 SSH 密钥..." + if [ -f "$SSH_KEY" ]; then + log_ok "本地密钥存在: $SSH_KEY" + return 0 + else + log_warn "本地密钥不存在: $SSH_KEY" + return 1 + fi +} + +# 生成本地 SSH 密钥 +generate_local_key() { + log_info "生成本地 SSH 密钥..." + if [ -f "$SSH_KEY" ]; then + log_ok "密钥已存在,跳过生成" + return 0 + fi + ssh-keygen -t rsa -b 4096 -f "$SSH_KEY" -N "" -q + log_ok "密钥生成完成: $SSH_KEY" +} + +# 检查单个主机的免密状态 +check_host() { + local host="$1" + local timeout=5 + + # 尝试免密登录 + if ssh -o BatchMode=yes -o ConnectTimeout=$timeout -o StrictHostKeyChecking=no "${SSH_USER}@${host}" "exit" 2>/dev/null; then + log_ok "$host - 免密登录正常 (用户: $SSH_USER)" + return 0 + else + log_fail "$host - 免密登录失败 (用户: $SSH_USER)" + return 1 + fi +} + +# 配置单个主机的免密登录 +setup_host() { + local host="$1" + local timeout=10 + + # 先检查是否已经可以免密登录 + if ssh -o BatchMode=yes -o ConnectTimeout=$timeout -o StrictHostKeyChecking=no "${SSH_USER}@${host}" "exit" 2>/dev/null; then + log_ok "$host - 已配置免密登录 (用户: $SSH_USER)" + return 0 + fi + + log_info "$host - 配置免密登录 (用户: $SSH_USER)..." + + # 使用 sshpass + ssh-copy-id 复制公钥 + if command -v sshpass &>/dev/null; then + if sshpass -p "$SSH_PASS" ssh-copy-id -o StrictHostKeyChecking=no -o ConnectTimeout=$timeout "${SSH_USER}@${host}" 2>/dev/null; then + log_ok "$host - 免密登录配置成功" + return 0 + else + log_fail "$host - 免密登录配置失败" + return 1 + fi + else + log_warn "sshpass 未安装,请先安装: apt install sshpass 或 yum install sshpass" + log_info "尝试手动方式 (需要输入密码: $SSH_PASS)..." + if ssh-copy-id -o StrictHostKeyChecking=no -o ConnectTimeout=$timeout "${SSH_USER}@${host}"; then + log_ok "$host - 免密登录配置成功" + return 0 + else + log_fail "$host - 免密登录配置失败" + return 1 + fi + fi +} + +# 批量检查 +batch_check() { + log_info "========== 批量检查 SSH 免密登录 ==========" + local hosts + hosts=$(get_hosts) + local total=0 + local success=0 + local failed=0 + local failed_hosts="" + + check_local_key + echo "" + + for host in $hosts; do + total=$((total + 1)) + if check_host "$host"; then + success=$((success + 1)) + else + failed=$((failed + 1)) + failed_hosts="$failed_hosts $host" + fi + done + + echo "" + log_info "========== 检查结果 ==========" + log_info "总计: $total 台主机" + log_ok "成功: $success 台" + [ $failed -gt 0 ] && log_fail "失败: $failed 台" + + if [ $failed -gt 0 ]; then + echo "" + log_warn "失败的主机列表:" + for h in $failed_hosts; do + log_warn " - $h" + done + fi + + return $failed +} + +# 批量配置 +batch_setup() { + log_info "========== 批量配置 SSH 免密登录 ==========" + local hosts + hosts=$(get_hosts) + local total=0 + local success=0 + local failed=0 + local failed_hosts="" + + # 确保本地密钥存在 + generate_local_key + echo "" + + for host in $hosts; do + total=$((total + 1)) + if setup_host "$host"; then + success=$((success + 1)) + else + failed=$((failed + 1)) + failed_hosts="$failed_hosts $host" + fi + done + + echo "" + log_info "========== 配置结果 ==========" + log_info "总计: $total 台主机" + log_ok "成功: $success 台" + [ $failed -gt 0 ] && log_fail "失败: $failed 台" + + if [ $failed -gt 0 ]; then + echo "" + log_warn "失败的主机列表:" + for h in $failed_hosts; do + log_warn " - $h" + done + echo "" + log_warn "请手动执行: ssh-copy-id ${SSH_USER}@" + fi + + return $failed +} + +# 配置节点间互相免密 (mesh) +setup_mesh() { + log_info "========== 配置节点间互相免密登录 ==========" + local hosts + hosts=$(get_hosts) + local host_array=($hosts) + local total=${#host_array[@]} + local success=0 + local failed=0 + local failed_pairs="" + + # 检查 sshpass + if ! command -v sshpass &>/dev/null; then + log_fail "sshpass 未安装,请先安装: apt install sshpass 或 yum install sshpass" + return 1 + fi + + log_info "共 $total 台主机,需要配置 $((total * (total - 1))) 对免密关系" + echo "" + + for src_host in "${host_array[@]}"; do + log_info "配置 $src_host 到其他节点的免密..." + + # 先在源节点生成密钥(如果不存在) + sshpass -p "$SSH_PASS" ssh -o StrictHostKeyChecking=no "${SSH_USER}@${src_host}" \ + "[ -f ~/.ssh/id_rsa ] || ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -N '' -q" 2>/dev/null + + # 获取源节点的公钥 + local src_pubkey + src_pubkey=$(sshpass -p "$SSH_PASS" ssh -o StrictHostKeyChecking=no "${SSH_USER}@${src_host}" "cat ~/.ssh/id_rsa.pub" 2>/dev/null) + + if [ -z "$src_pubkey" ]; then + log_fail "$src_host - 无法获取公钥" + failed=$((failed + 1)) + continue + fi + + for dst_host in "${host_array[@]}"; do + [ "$src_host" = "$dst_host" ] && continue + + # 检查是否已经可以免密 + if sshpass -p "$SSH_PASS" ssh -o StrictHostKeyChecking=no "${SSH_USER}@${src_host}" \ + "ssh -o BatchMode=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=no ${SSH_USER}@${dst_host} exit" 2>/dev/null; then + log_ok " $src_host -> $dst_host (已配置)" + success=$((success + 1)) + else + # 将源节点公钥添加到目标节点 + if sshpass -p "$SSH_PASS" ssh -o StrictHostKeyChecking=no "${SSH_USER}@${dst_host}" \ + "mkdir -p ~/.ssh && chmod 700 ~/.ssh && echo '$src_pubkey' >> ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys && sort -u ~/.ssh/authorized_keys -o ~/.ssh/authorized_keys" 2>/dev/null; then + log_ok " $src_host -> $dst_host (新配置)" + success=$((success + 1)) + else + log_fail " $src_host -> $dst_host (失败)" + failed=$((failed + 1)) + failed_pairs="$failed_pairs\n $src_host -> $dst_host" + fi + fi + done + done + + echo "" + log_info "========== 配置结果 ==========" + log_info "总计: $((total * (total - 1))) 对免密关系" + log_ok "成功: $success 对" + [ $failed -gt 0 ] && log_fail "失败: $failed 对" + + if [ $failed -gt 0 ]; then + echo "" + log_warn "失败的配对:" + echo -e "$failed_pairs" + fi + + return $failed +} + +# 检查节点间互相免密状态 +check_mesh() { + log_info "========== 检查节点间互相免密登录 ==========" + local hosts + hosts=$(get_hosts) + local host_array=($hosts) + local total=${#host_array[@]} + local success=0 + local failed=0 + local failed_pairs="" + + # 检查 sshpass + if ! command -v sshpass &>/dev/null; then + log_fail "sshpass 未安装,请先安装: apt install sshpass 或 yum install sshpass" + return 1 + fi + + log_info "共 $total 台主机,检查 $((total * (total - 1))) 对免密关系" + echo "" + + for src_host in "${host_array[@]}"; do + for dst_host in "${host_array[@]}"; do + [ "$src_host" = "$dst_host" ] && continue + + # 从控制节点 SSH 到源节点,再从源节点 SSH 到目标节点 + if sshpass -p "$SSH_PASS" ssh -o StrictHostKeyChecking=no "${SSH_USER}@${src_host}" \ + "ssh -o BatchMode=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=no ${SSH_USER}@${dst_host} exit" 2>/dev/null; then + log_ok "$src_host -> $dst_host" + success=$((success + 1)) + else + log_fail "$src_host -> $dst_host" + failed=$((failed + 1)) + failed_pairs="$failed_pairs\n $src_host -> $dst_host" + fi + done + done + + echo "" + log_info "========== 检查结果 ==========" + log_info "总计: $((total * (total - 1))) 对免密关系" + log_ok "成功: $success 对" + [ $failed -gt 0 ] && log_fail "失败: $failed 对" + + if [ $failed -gt 0 ]; then + echo "" + log_warn "失败的配对:" + echo -e "$failed_pairs" + fi + + return $failed +} + +# 显示帮助 +show_help() { + echo "SSH 免密登录批量检查和配置脚本" + echo "" + echo "用法: $0 [inventory_file] [action] [user] [password]" + echo "" + echo "参数:" + echo " inventory_file Ansible inventory 文件路径 (默认: inventory.ini)" + echo " action 操作类型:" + echo " check - 检查控制节点到各主机的免密状态 (默认)" + echo " setup - 配置控制节点到各主机的免密登录" + echo " check-mesh - 检查所有节点间的互相免密状态" + echo " setup-mesh - 配置所有节点间的互相免密登录" + echo " help - 显示帮助" + echo " user SSH 用户名 (默认: toor)" + echo " password SSH 密码 (默认: root@123)" + echo "" + echo "示例:" + echo " $0 inventory.ini check # 检查控制节点免密状态" + echo " $0 inventory.ini setup # 配置控制节点免密" + echo " $0 inventory.ini check-mesh # 检查节点间互相免密" + echo " $0 inventory.ini setup-mesh # 配置节点间互相免密 (MPI需要)" +} + +# 主函数 +main() { + case "$ACTION" in + check) + batch_check + ;; + setup) + batch_setup + ;; + check-mesh) + check_mesh + ;; + setup-mesh) + setup_mesh + ;; + help|--help|-h) + show_help + ;; + *) + echo "未知操作: $ACTION" + show_help + exit 1 + ;; + esac +} + +main diff --git a/tools/scripts/ansible/templates/ib_test_summary.j2 b/tools/scripts/ansible/templates/ib_test_summary.j2 new file mode 100644 index 0000000000..b1a7f910f9 --- /dev/null +++ b/tools/scripts/ansible/templates/ib_test_summary.j2 @@ -0,0 +1,14 @@ +=== IB Network Test Summary === +Generated: {{ ansible_date_time.iso8601 }} + +Nodes: {{ groups['all'] | length }} +{% for host in groups['all'] %}- {{ host }} ({{ hostvars[host].mgmt_ip | default('N/A') }}) +{% endfor %} + +Tests: +- Ping: {{ 'SKIP' if skip_ping else 'OK' }} +- IB Write: {{ 'SKIP' if skip_ib_write else 'OK' }} +- MPI Single: {{ 'SKIP' if skip_mpi_single else 'OK' }} +- MPI Multi: {{ 'SKIP' if skip_mpi_multi else 'OK' }} + +Results: ./ib_test_results/{{ timestamp }}/ diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml index c87d1c4b90..fbc6332592 100644 --- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -5,12 +5,9 @@ linters: disable-all: true enable: - misspell - - structcheck - govet - staticcheck - - deadcode - errcheck - - varcheck - unparam - ineffassign - nakedret diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index f12626423a..fabe5e43dc 100644 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,59 @@ # Changelog +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + ## 3.2.0 (2022-11-28) ### Added @@ -109,7 +163,7 @@ functions. These are described in the added and changed sections below. - #78: Fix unchecked error in example code (thanks @ravron) - #70: Fix the handling of pre-releases and the 0.0.0 release edge case - #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num +- #107: Fix handling prerelease when sorting alphanum and num - #109: Fixed where Validate sometimes returns wrong message on error ## 1.4.2 (2018-04-10) diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile index eac19178fb..9ca87a2c79 100644 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -1,7 +1,5 @@ GOPATH=$(shell go env GOPATH) GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -GOFUZZ = $(GOPATH)/bin/go-fuzz .PHONY: lint lint: $(GOLANGCI_LINT) @@ -19,19 +17,15 @@ test-cover: GO111MODULE=on go test -cover . .PHONY: fuzz -fuzz: $(GOFUZZBUILD) $(GOFUZZ) - @echo "==> Fuzz testing" - $(GOFUZZBUILD) - $(GOFUZZ) -workdir=_fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go env GOCACHE + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . $(GOLANGCI_LINT): # Install golangci-lint. The configuration for it is in the .golangci.yml # file in the root of the repository echo ${GOPATH} - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 - -$(GOFUZZBUILD): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build - -$(GOFUZZ): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index d8f54dcbd3..2f56c676a5 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -13,23 +13,22 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds [![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - ## Package Versions +Note, import `github.com/Masterminds/semver/v3` to use the latest version. + There are three major versions fo the `semver` package. -* 3.x.x is the new stable and active version. This version is focused on constraint +* 3.x.x is the stable and active version. This version is focused on constraint compatibility for range handling in other tools from other languages. It has a similar API to the v1 releases. The development of this version is on the master branch. The documentation for this version is below. * 2.x was developed primarily for [dep](https://github.com/golang/dep). There are no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -* 1.x.x is the most widely used version with numerous tagged releases. This is the - previous stable and is still maintained for bug fixes. The development, to fix - bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). ## Parsing Semantic Versions @@ -51,6 +50,18 @@ other versions, convert the version back into a string, and get the original string. Getting the original string is useful if the semantic version was coerced into a valid form. +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + ## Sorting Semantic Versions A set of versions can be sorted using the `sort` package from the standard library. @@ -78,12 +89,12 @@ There are two methods for comparing versions. One uses comparison methods on differences to notes between these two methods of comparison. 1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases + and others it will follow the specification and always include pre-releases within the comparison. It will provide an answer that is valid with the comparison section of the spec at https://semver.org/#spec-item-11 2. When constraint checking is used for checks or validation it will follow a different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the + and Rust/Cargo. This includes considering pre-releases to be invalid if the ranges does not include one. If you want to have it include pre-releases a simple solution is to include `-0` in your range. 3. Constraint ranges can have some complex rules including the shorthand use of @@ -111,7 +122,7 @@ v, err := semver.NewVersion("1.3") if err != nil { // Handle version not being parsable. } -// Check if the version meets the constraints. The a variable will be true. +// Check if the version meets the constraints. The variable a will be true. a := c.Check(v) ``` @@ -135,20 +146,20 @@ The basic comparisons are: ### Working With Prerelease Versions Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of prereleases include -development, alpha, beta, and release candidate releases. A prerelease may be +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precedence, prereleases come before their associated releases. In this +order of precedence, pre-releases come before their associated releases. In this example `1.2.3-beta.1 < 1.2.3`. -According to the Semantic Version specification prereleases may not be +According to the Semantic Version specification, pre-releases may not be API compliant with their release counterpart. It says, > A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. -SemVer comparisons using constraints without a prerelease comparator will skip -prerelease versions. For example, `>=1.2.3` will skip prereleases when looking -at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. The reason for the `0` as a pre-release version in the example comparison is because pre-releases can only contain ASCII alphanumerics and hyphens (along with @@ -161,6 +172,10 @@ means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case sensitivity doesn't apply here. This is due to ASCII sort ordering which is what the spec specifies. +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + ### Hyphen Range Comparisons There are multiple methods to handle ranges and the first is hyphens ranges. @@ -169,6 +184,9 @@ These look like: * `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + ### Wildcards In Comparisons The `x`, `X`, and `*` characters can be used as a wildcard character. This works @@ -242,3 +260,15 @@ for _, m := range msgs { If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://codeql.github.com) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 0000000000..a30a66b1f7 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go index 203072e464..8b7a10f836 100644 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -12,6 +12,13 @@ import ( // checked against. type Constraints struct { constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool } // NewConstraint returns a Constraints instance that a Version instance can @@ -22,11 +29,10 @@ func NewConstraint(c string) (*Constraints, error) { c = rewriteRange(c) ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) for k, v := range ors { - - // TODO: Find a way to validate and fetch all the constraints in a simpler form - // Validate the segment if !validConstraintRegex.MatchString(v) { return nil, fmt.Errorf("improper constraint: %s", v) @@ -43,12 +49,22 @@ func NewConstraint(c string) (*Constraints, error) { return nil, err } + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + result[i] = pc } or[k] = result } - o := &Constraints{constraints: or} + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } return o, nil } @@ -57,10 +73,10 @@ func (cs Constraints) Check(v *Version) bool { // TODO(mattfarina): For v4 of this library consolidate the Check and Validate // functions as the underlying functions make that possible now. // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { - if check, _ := c.check(v); !check { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { joy = false break } @@ -83,12 +99,12 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { // Capture the prerelease message only once. When it happens the first time // this var is marked var prerelesase bool - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { // Before running the check handle the case there the version is // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { if !prerelesase { em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) e = append(e, em) @@ -98,7 +114,7 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { } else { - if _, err := c.check(v); err != nil { + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { e = append(e, err) joy = false } @@ -227,8 +243,8 @@ type constraint struct { } // Check if a version meets the constraint -func (c *constraint) check(v *Version) (bool, error) { - return constraintOps[c.origfunc](v, c) +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) } // String prints an individual constraint into a string @@ -236,7 +252,7 @@ func (c *constraint) string() string { return c.origfunc + c.orig } -type cfunc func(v *Version, c *constraint) (bool, error) +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) func parseConstraint(c string) (*constraint, error) { if len(c) > 0 { @@ -272,7 +288,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs.con = con @@ -290,7 +306,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs := &constraint{ @@ -305,16 +321,14 @@ func parseConstraint(c string) (*constraint, error) { } // Constraint functions -func constraintNotEqual(v *Version, c *constraint) (bool, error) { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + if c.dirty { if c.con.Major() != v.Major() { return true, nil } @@ -345,12 +359,11 @@ func constraintNotEqual(v *Version, c *constraint) (bool, error) { return true, nil } -func constraintGreaterThan(v *Version, c *constraint) (bool, error) { +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -391,11 +404,10 @@ func constraintGreaterThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) } -func constraintLessThan(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -406,12 +418,11 @@ func constraintLessThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) } -func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -422,11 +433,10 @@ func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than %s", v, c.orig) } -func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -455,11 +465,10 @@ func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { // ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 // ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 // ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -487,16 +496,15 @@ func constraintTilde(v *Version, c *constraint) (bool, error) { // When there is a .x (dirty) status it automatically opts in to ~. Otherwise // it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } if c.dirty { - return constraintTilde(v, c) + return constraintTilde(v, c, includePre) } eq := v.Equal(c.con) @@ -516,11 +524,10 @@ func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { // ^0.0.3 --> >=0.0.3 <0.0.4 // ^0.0 --> >=0.0.0 <0.1.0 // ^0 --> >=0.0.0 <1.0.0 -func constraintCaret(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -586,7 +593,7 @@ func rewriteRange(i string) string { } o := i for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) o = strings.Replace(o, v[0], t, 1) } diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go deleted file mode 100644 index a242ad7058..0000000000 --- a/vendor/github.com/Masterminds/semver/v3/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - d := string(data) - - // Test NewVersion - _, _ = NewVersion(d) - - // Test StrictNewVersion - _, _ = StrictNewVersion(d) - - // Test NewConstraint - _, _ = NewConstraint(d) - - // The return value should be 0 normally, 1 if the priority in future tests - // should be increased, and -1 if future tests should skip passing in that - // data. We do not have a reason to change priority so 0 is always returned. - // There are example tests that do this. - return 0 -} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index 7c4bed3347..7a3ba73887 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -14,32 +14,52 @@ import ( // The compiled version of the regex created at init() is cached here so it // only needs to be created once. var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true var ( // ErrInvalidSemVer is returned a version is found to be invalid when // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") + ErrInvalidSemVer = errors.New("invalid semantic version") // ErrEmptyString is returned when an empty string is passed in for parsing. - ErrEmptyString = errors.New("Version string empty") + ErrEmptyString = errors.New("version string empty") // ErrInvalidCharacters is returned when invalid characters are found as // part of a version - ErrInvalidCharacters = errors.New("Invalid characters in version") + ErrInvalidCharacters = errors.New("invalid characters in version") // ErrSegmentStartsZero is returned when a version segment starts with 0. // This is invalid in SemVer. - ErrSegmentStartsZero = errors.New("Version segment starts with 0") + ErrSegmentStartsZero = errors.New("version segment starts with 0") // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") + ErrInvalidMetadata = errors.New("invalid metadata string") // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") + ErrInvalidPrerelease = errors.New("invalid prerelease string") ) // semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` @@ -53,6 +73,7 @@ type Version struct { func init() { versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") } const ( @@ -83,22 +104,23 @@ func StrictNewVersion(v string) (*Version, error) { original: v, } - // check for prerelease or build metadata - var extra []string - if strings.ContainsAny(parts[2], "-+") { - // Start with the build metadata first as it needs to be on the right - extra = strings.SplitN(parts[2], "+", 2) - if len(extra) > 1 { - // build metadata found - sv.metadata = extra[1] - parts[2] = extra[0] + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err } + } - extra = strings.SplitN(parts[2], "-", 2) - if len(extra) > 1 { - // prerelease found - sv.pre = extra[1] - parts[2] = extra[0] + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err } } @@ -114,7 +136,7 @@ func StrictNewVersion(v string) (*Version, error) { } } - // Extract the major, minor, and patch elements onto the returned Version + // Extract major, minor, and patch var err error sv.major, err = strconv.ParseUint(parts[0], 10, 64) if err != nil { @@ -131,11 +153,71 @@ func StrictNewVersion(v string) (*Version, error) { return nil, err } - // No prerelease or build metadata found so returning now as a fastpath. - if sv.pre == "" && sv.metadata == "" { - return sv, nil + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } + m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 } + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + if sv.pre != "" { if err = validatePrerelease(sv.pre); err != nil { return nil, err @@ -151,12 +233,8 @@ func StrictNewVersion(v string) (*Version, error) { return sv, nil } -// NewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. If the version is SemVer-ish it -// attempts to convert it to SemVer. If you want to validate it was a strict -// semantic version at parse time see StrictNewVersion(). -func NewVersion(v string) (*Version, error) { - m := versionRegex.FindStringSubmatch(v) +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) if m == nil { return nil, ErrInvalidSemVer } @@ -170,13 +248,13 @@ func NewVersion(v string) (*Version, error) { var err error sv.major, err = strconv.ParseUint(m[1], 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } if m[2] != "" { sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.minor = 0 @@ -185,7 +263,7 @@ func NewVersion(v string) (*Version, error) { if m[3] != "" { sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.patch = 0 @@ -381,15 +459,31 @@ func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + // GreaterThan tests if one version is greater than another one. func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + // Equal tests if two versions are equal to each other. // Note, versions can be equal with different metadata since metadata // is not considered part of the comparable version. func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } return v.Compare(o) == 0 } @@ -612,7 +706,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { - if containsOnly(p, num) { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -631,9 +727,62 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { - if !containsOnly(p, allowed) { + if p == "" { return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) } } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/Mellanox/rdmamap/.golangci.yml b/vendor/github.com/Mellanox/rdmamap/.golangci.yml new file mode 100644 index 0000000000..09248db5f9 --- /dev/null +++ b/vendor/github.com/Mellanox/rdmamap/.golangci.yml @@ -0,0 +1,102 @@ +linters-settings: + dupl: + threshold: 150 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 2 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + - unnamedResult + gocognit: + min-complexity: 30 + goimports: + local-prefixes: github.com/Mellanox/rdmamap + golint: + min-confidence: 0 + gomnd: + settings: + mnd: + # don't include the "operation" and "assign" + checks: argument,case,condition,return + govet: + check-shadowing: true + settings: + printf: + funcs: + - (github.com/rs/zerolog/zerolog.Event).Msgf + lll: + line-length: 120 + misspell: + locale: US + prealloc: + # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # True by default. + simple: true + range-loops: true # Report preallocation suggestions on range loops, true by default + for-loops: false # Report preallocation suggestions on for loops, false by default + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - funlen + - gochecknoinits + - goconst + - gocritic + - gocognit + - gofmt + - goimports + - golint + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - rowserrcheck + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + - goconst + - text: "Magic number: 1" + linters: + - gomnd diff --git a/vendor/github.com/Mellanox/rdmamap/.travis.yml b/vendor/github.com/Mellanox/rdmamap/.travis.yml new file mode 100644 index 0000000000..97f41cea81 --- /dev/null +++ b/vendor/github.com/Mellanox/rdmamap/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - "1.13" + +before_script: + - go get -u github.com/mattn/goveralls + +script: + - make lint + - make test-coverage + - goveralls -coverprofile=rdmamap.cover -service=travis-ci diff --git a/vendor/github.com/Mellanox/rdmamap/LICENSE b/vendor/github.com/Mellanox/rdmamap/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/Mellanox/rdmamap/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Mellanox/rdmamap/Makefile b/vendor/github.com/Mellanox/rdmamap/Makefile new file mode 100644 index 0000000000..dd7d7ad3cd --- /dev/null +++ b/vendor/github.com/Mellanox/rdmamap/Makefile @@ -0,0 +1,80 @@ +# Package related +PACKAGE=rdmamap +ORG_PATH=github.com/Mellanox +REPO_PATH=$(ORG_PATH)/$(PACKAGE) +GOPATH=$(CURDIR)/.gopath +GOBIN =$(CURDIR)/bin +BASE=$(GOPATH)/src/$(REPO_PATH) +GOFILES=$(shell find . -name "*.go" | grep -vE "(\/vendor\/)|(_test.go)") +PKGS=$(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -v "^$(PACKAGE)/vendor/")) +TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS)) + +export GOPATH + +# Go tools +GO = go +GOLANGCI_LINT = $(GOBIN)/golangci-lint +# golangci-lint version should be updated periodically +# we keep it fixed to avoid it from unexpectedly failing on the project +# in case of a version bump +GOLANGCI_LINT_VER = v1.23.8 +TIMEOUT = 15 +Q = $(if $(filter 1,$V),,@) + +.PHONY: all +all: lint test build + +$(GOBIN): + @mkdir -p $@ + +$(BASE): ; $(info setting GOPATH...) + @mkdir -p $(dir $@) + @ln -sf $(CURDIR) $@ + +build: $(GOFILES) + @CGO_ENABLED=0 $(GO) build -v + +# Tools + +$(GOLANGCI_LINT): ; $(info building golangci-lint...) + $Q curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOBIN) $(GOLANGCI_LINT_VER) + +GOVERALLS = $(GOBIN)/goveralls +$(GOBIN)/goveralls: $(BASE) $(GOBIN) ; $(info building goveralls...) + $Q go get github.com/mattn/goveralls + +# Tests + +.PHONY: lint +lint: $(BASE) $(GOLANGCI_LINT) ; $(info running golangci-lint...) @ ## Run golangci-lint + $Q mkdir -p $(BASE)/test + $Q cd $(BASE) && ret=0 && \ + test -z "$$($(GOLANGCI_LINT) run | tee $(BASE)/test/lint.out)" || ret=1 ; \ + cat $(BASE)/test/lint.out ; rm -rf $(BASE)/test ; \ + exit $$ret + + +.PHONY: test tests +test: $(BASE) ; $(info running unit tests...) @ ## Run unit tests + $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) ./... + +tests: test lint ; + +COVERAGE_MODE = count +.PHONY: test-coverage test-coverage-tools +test-coverage-tools: $(GOVERALLS) +test-coverage: COVERAGE_DIR := $(CURDIR)/test +test-coverage: test-coverage-tools $(BASE) ; $(info running coverage tests...) @ ## Run coverage tests + $Q cd $(BASE); $(GO) test -covermode=$(COVERAGE_MODE) -coverprofile=rdmamap.cover ./... + +# Misc + +.PHONY: clean +clean: ; $(info Cleaning...) @ ## Cleanup everything + @rm -rf $(GOPATH) + @rm -rf test + +.PHONY: help +help: ## Show this message + @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ + awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/Mellanox/rdmamap/README.md b/vendor/github.com/Mellanox/rdmamap/README.md new file mode 100644 index 0000000000..22aaa5c1ea --- /dev/null +++ b/vendor/github.com/Mellanox/rdmamap/README.md @@ -0,0 +1,40 @@ +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0) +[![Go Report Card](https://goreportcard.com/badge/github.com/Mellanox/rdmamap)](https://goreportcard.com/report/github.com/Mellanox/rdmamap) +[![Build Status](https://travis-ci.com/Mellanox/rdmamap.svg?branch=master)](https://travis-ci.com/Mellanox/rdmamap) +[![Coverage Status](https://coveralls.io/repos/github/Mellanox/rdmamap/badge.svg)](https://coveralls.io/github/Mellanox/rdmamap) + +# rdmamap + +This is golang package that provides map of rdma device with its character and network devices. + +It uses sysfs and netlink interfaces provided by kernel to perform this mapping. + +Local build and test + +You can use go get command: +``` +go get github.com/Mellanox/rdmamap +``` + +Example: + +``` +package main + +import ( + "fmt" + "github.com/Mellanox/rdmamap" +) + +func main() { + rdmaDevices := rdmamap.GetRdmaDeviceList() + fmt.Println("Devices: ", rdmaDevices) + + for _, dev := range rdmaDevices { + charDevices := rdmamap.GetRdmaCharDevices(dev) + fmt.Printf("Rdma device: = %s", dev) + fmt.Println(" Char devices: = ", charDevices) + } +} + +``` diff --git a/vendor/github.com/Mellanox/rdmamap/rdma_map.go b/vendor/github.com/Mellanox/rdmamap/rdma_map.go new file mode 100644 index 0000000000..44e17e63bd --- /dev/null +++ b/vendor/github.com/Mellanox/rdmamap/rdma_map.go @@ -0,0 +1,396 @@ +package rdmamap + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/vishvananda/netlink" +) + +const ( + RdmaClassName = "infiniband" + RdmaClassDir = "/sys/class/infiniband" + RdmaIbUcmDir = "/sys/class/infiniband_cm" + RdmaUcmFilePrefix = "ucm" + + RdmaUmadDir = "/sys/class/infiniband_mad" + RdmaIssmFilePrefix = "issm" + RdmaUmadFilxPrefix = "umad" + + RdmaUverbsDir = "/sys/class/infiniband_verbs" + RdmaUverbsFilxPrefix = "uverbs" + + RdmaGidAttrDir = "gid_attrs" //nolint:stylecheck,golint + RdmaGidAttrNdevDir = "ndevs" //nolint:stylecheck,golint + RdmaPortsdir = "ports" + + RdmaNodeGuidFile = "node_guid" //nolint:stylecheck,golint + RdmaUcmDevice = "/dev/infiniband/rdma_cm" + RdmaDeviceDir = "/dev/infiniband" + + RdmaCountersDir = "counters" + RdmaHwCountersDir = "hw_counters" + + PciDevDir = "/sys/bus/pci/devices" + AuxDevDir = "/sys/bus/auxiliary/devices" + + // For local usage + prevDir = ".." + nibbleBitSize = 4 + loopBackIfName = "lo" +) + +// Returns a list of rdma device names +//nolint:prealloc +func GetRdmaDeviceList() []string { + var rdmaDevices []string + fd, err := os.Open(RdmaClassDir) + if err != nil { + return nil + } + defer fd.Close() + + fileInfos, err := fd.Readdir(-1) + if err != nil { + return nil + } + + for i := range fileInfos { + if fileInfos[i].IsDir() { + continue + } + rdmaDevices = append(rdmaDevices, fileInfos[i].Name()) + } + return rdmaDevices +} + +func isDirForRdmaDevice(rdmaDeviceName, dirName string) bool { + fileName := filepath.Join(dirName, "ibdev") + + fd, err := os.OpenFile(fileName, os.O_RDONLY, 0444) + if err != nil { + return false + } + defer fd.Close() + + if _, err = fd.Seek(0, io.SeekStart); err != nil { + return false + } + + data, err := ioutil.ReadAll(fd) + if err != nil { + return false + } + return (strings.Compare(strings.Trim(string(data), "\n"), rdmaDeviceName) == 0) +} + +func getCharDevice(rdmaDeviceName, classDir, charDevPrefix string) (string, error) { + fd, err := os.Open(classDir) + if err != nil { + return "", err + } + defer fd.Close() + fileInfos, err := fd.Readdir(-1) + if err != nil { + return "", nil + } + + for i := range fileInfos { + if fileInfos[i].Name() == "." || fileInfos[i].Name() == prevDir { + continue + } + if !strings.Contains(fileInfos[i].Name(), charDevPrefix) { + continue + } + dirName := filepath.Join(classDir, fileInfos[i].Name()) + if !isDirForRdmaDevice(rdmaDeviceName, dirName) { + continue + } + deviceFile := filepath.Join("/dev/infiniband", fileInfos[i].Name()) + return deviceFile, nil + } + return "", fmt.Errorf("no ucm device found") +} + +func getUcmDevice(rdmaDeviceName string) (string, error) { + return getCharDevice(rdmaDeviceName, + RdmaIbUcmDir, + RdmaUcmFilePrefix) +} + +func getIssmDevice(rdmaDeviceName string) (string, error) { + return getCharDevice(rdmaDeviceName, + RdmaUmadDir, + RdmaIssmFilePrefix) +} + +func getUmadDevice(rdmaDeviceName string) (string, error) { + return getCharDevice(rdmaDeviceName, + RdmaUmadDir, + RdmaUmadFilxPrefix) +} + +func getUverbDevice(rdmaDeviceName string) (string, error) { + return getCharDevice(rdmaDeviceName, + RdmaUverbsDir, + RdmaUverbsFilxPrefix) +} + +func getRdmaUcmDevice() (string, error) { + info, err := os.Stat(RdmaUcmDevice) + if err != nil { + return "", err + } + if info.Name() == "rdma_cm" { + return RdmaUcmDevice, nil + } + + return "", fmt.Errorf("invalid file name rdma_cm") +} + +// Returns a list of character device absolute path for a requested +// rdmaDeviceName. +// Returns nil if no character devices are found. +func GetRdmaCharDevices(rdmaDeviceName string) []string { + var rdmaCharDevices []string + + ucm, err := getUcmDevice(rdmaDeviceName) + if err == nil { + rdmaCharDevices = append(rdmaCharDevices, ucm) + } + issm, err := getIssmDevice(rdmaDeviceName) + if err == nil { + rdmaCharDevices = append(rdmaCharDevices, issm) + } + umad, err := getUmadDevice(rdmaDeviceName) + if err == nil { + rdmaCharDevices = append(rdmaCharDevices, umad) + } + uverb, err := getUverbDevice(rdmaDeviceName) + if err == nil { + rdmaCharDevices = append(rdmaCharDevices, uverb) + } + rdmaCm, err := getRdmaUcmDevice() + if err == nil { + rdmaCharDevices = append(rdmaCharDevices, rdmaCm) + } + + return rdmaCharDevices +} + +// Gets a list of ports for a specified device +//nolint:prealloc +func GetPorts(rdmaDeviceName string) []string { + var ports []string + + portsDir := filepath.Join(RdmaClassDir, rdmaDeviceName, RdmaPortsdir) + fd, err := os.Open(portsDir) + if err != nil { + return nil + } + defer fd.Close() + + fileInfos, err := fd.Readdir(-1) + if err != nil { + return nil + } + + for i := range fileInfos { + if fileInfos[i].Name() == "." || fileInfos[i].Name() == prevDir { + continue + } + ports = append(ports, fileInfos[i].Name()) + } + return ports +} + +//nolint:prealloc +func getNetdeviceIds(rdmaDeviceName, port string) []string { + var indices []string + + dir := filepath.Join(RdmaClassDir, rdmaDeviceName, RdmaPortsdir, port, + RdmaGidAttrDir, RdmaGidAttrNdevDir) + + fd, err := os.Open(dir) + if err != nil { + return nil + } + defer fd.Close() + + fileInfos, err := fd.Readdir(-1) + if err != nil { + return nil + } + + for i := range fileInfos { + if fileInfos[i].Name() == "." || fileInfos[i].Name() == prevDir { + continue + } + indices = append(indices, fileInfos[i].Name()) + } + return indices +} + +func isNetdevForRdma(rdmaDeviceName, port, index, netdevName string) bool { + fileName := filepath.Join(RdmaClassDir, rdmaDeviceName, RdmaPortsdir, port, + RdmaGidAttrDir, RdmaGidAttrNdevDir, index) + + fd, err := os.OpenFile(fileName, os.O_RDONLY, 0444) + if err != nil { + return false + } + defer fd.Close() + + if _, err = fd.Seek(0, io.SeekStart); err != nil { + return false + } + + data, err := ioutil.ReadAll(fd) + if err != nil { + return false + } + return (strings.TrimSuffix(string(data), "\n") == netdevName) +} + +func getRdmaDeviceForEth(netdevName string) (string, error) { + // Iterate over the list of rdma devices, + // read the gid table attribute netdev + // if the netdev matches, found the matching rdma device + + devices := GetRdmaDeviceList() + for _, dev := range devices { + ports := GetPorts(dev) + for _, port := range ports { + indices := getNetdeviceIds(dev, port) + for _, index := range indices { + found := isNetdevForRdma(dev, port, index, netdevName) + if found { + return dev, nil + } + } + } + } + return "", fmt.Errorf("rdma device not found for netdev %v", netdevName) +} + +func getNodeGUID(rdmaDeviceName string) ([]byte, error) { + var nodeGUID []byte + + fileName := filepath.Join(RdmaClassDir, rdmaDeviceName, RdmaNodeGuidFile) + + fd, err := os.OpenFile(fileName, os.O_RDONLY, 0444) + if err != nil { + return nil, err + } + defer fd.Close() + + if _, err = fd.Seek(0, io.SeekStart); err != nil { + return nil, err + } + data, err := ioutil.ReadAll(fd) + if err != nil { + return nil, err + } + data = data[:len(data)-1] + var j int + for _, b := range data { + if b == ':' { + continue + } + c, err := strconv.ParseUint(string(b), 16, 8) + if err != nil { + return nil, err + } + if (j % 2) == 0 { + nodeGUID = append(nodeGUID, byte(c)<//hw_counters */ + Stats []RdmaStatEntry /* /sys/class/infiniband///counters */ + Port int +} + +type RdmaStats struct { + PortStats []RdmaPortStats +} + +func readCounter(name string) uint64 { + fd, err := os.OpenFile(name, os.O_RDONLY, 0444) + if err != nil { + return 0 + } + defer fd.Close() + + if _, err = fd.Seek(0, io.SeekStart); err != nil { + return 0 + } + + data, err := ioutil.ReadAll(fd) + if err != nil { + return 0 + } + dataStr := string(data) + dataStr = strings.Trim(dataStr, "\n") + value, _ := strconv.ParseUint(dataStr, 10, 64) + return value +} + +//nolint:prealloc +func getCountersFromDir(path string) ([]RdmaStatEntry, error) { + var stats []RdmaStatEntry + + fd, err := os.Open(path) + if err != nil { + return stats, err + } + defer fd.Close() + + fileInfos, err := fd.Readdir(-1) + if err != nil { + return stats, err + } + + for _, file := range fileInfos { + if file.IsDir() { + continue + } + value := readCounter(filepath.Join(path, file.Name())) + entry := RdmaStatEntry{file.Name(), value} + stats = append(stats, entry) + } + return stats, nil +} + +// Get RDMA Sysfs stats from counters directory of a port of a rdma device +// Port number starts from 1. +func GetRdmaSysfsStats(rdmaDevice string, port int) ([]RdmaStatEntry, error) { + path := filepath.Join(RdmaClassDir, rdmaDevice, + RdmaPortsdir, strconv.Itoa(port), RdmaCountersDir) + + rdmastats, err := getCountersFromDir(path) + return rdmastats, err +} + +// Get RDMA Sysfs stats from hw_counters directory of a port of a rdma device +// Port number starts from 1. +func GetRdmaSysfsHwStats(rdmaDevice string, port int) ([]RdmaStatEntry, error) { + path := filepath.Join(RdmaClassDir, rdmaDevice, + RdmaPortsdir, strconv.Itoa(port), RdmaHwCountersDir) + + rdmastats, err := getCountersFromDir(path) + return rdmastats, err +} + +// Get RDMA sysfs starts from counter and hw_counters directory for a requested +// port of a device. +func GetRdmaSysfsAllStats(rdmaDevice string, port int) (RdmaPortStats, error) { + var portstats RdmaPortStats + + hwstats, err := GetRdmaSysfsHwStats(rdmaDevice, port) + if err != nil { + return portstats, nil + } + portstats.HwStats = hwstats + + stats, err := GetRdmaSysfsStats(rdmaDevice, port) + if err != nil { + return portstats, nil + } + portstats.Stats = stats + portstats.Port = port + return portstats, nil +} + +// Get RDMA sysfs starts from counter and hw_counters directory for a +// rdma device. +func GetRdmaSysfsAllPortsStats(rdmaDevice string) (RdmaStats, error) { + var allstats RdmaStats + + path := filepath.Join(RdmaClassDir, rdmaDevice, RdmaPortsdir) + fd, err := os.Open(path) + if err != nil { + return allstats, err + } + defer fd.Close() + + fileInfos, err := fd.Readdir(-1) + if err != nil { + return allstats, err + } + + for i, file := range fileInfos { + if fileInfos[i].Name() == "." || fileInfos[i].Name() == ".." { + continue + } + if !file.IsDir() { + continue + } + port, _ := strconv.Atoi(file.Name()) + portstats, err := GetRdmaSysfsAllStats(rdmaDevice, port) + if err != nil { + return allstats, err + } + allstats.PortStats = append(allstats.PortStats, portstats) + } + return allstats, nil +} + +func printRdmaStats(device string, stats *RdmaStats) { + for _, portstats := range stats.PortStats { + fmt.Printf("device: %s, port: %d\n", device, portstats.Port) + fmt.Println("Hw stats:") + for _, entry := range portstats.HwStats { + fmt.Printf("%s: %d\n", entry.Name, entry.Value) + } + fmt.Println("Stats:") + for _, entry := range portstats.Stats { + fmt.Printf("%s: %d\n", entry.Name, entry.Value) + } + } +} + +// Get RDMA statistics of a docker container. +// containerID is prefixed matched against the running docker containers, +// so a non ambiguous short identifier can be supplied as well. +func GetDockerContainerRdmaStats(containerID string) { + // Lock the OS Thread so we don't accidentally switch namespaces + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + originalHandle, err := netns.Get() + if err != nil { + log.Println("Fail to get handle of current net ns", err) + return + } + + nsHandle, err := netns.GetFromDocker(containerID) + if err != nil { + log.Println("Invalid docker id: ", containerID) + return + } + if netns.Set(nsHandle) != nil { + return + } + + ifaces, err := net.Interfaces() + if err != nil { + _ = netns.Set(originalHandle) + return + } + log.Printf("Net Interfaces: %v\n", ifaces) + for _, iface := range ifaces { + if iface.Name == loopBackIfName { + continue + } + rdmadev, err := GetRdmaDeviceForNetdevice(iface.Name) + if err != nil { + continue + } + rdmastats, err := GetRdmaSysfsAllPortsStats(rdmadev) + if err != nil { + log.Println("Fail to query device stats: ", err) + continue + } + printRdmaStats(rdmadev, &rdmastats) + } + _ = netns.Set(originalHandle) +} diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index 743732d3fe..d8aafb31e1 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -20,7 +20,7 @@ Alban Crequy alban@kinvolk.io Aleksander Mistewicz amistewicz@google.com Aleksei Zakharov zakharov.a.g@yandex.ru Alexander Alemayhu alexander@alemayhu.com -Alexander Berger alex-berger@users.noreply.github.com +Alexander Berger alex-berger@gmx.ch Alexander Block ablock84@gmail.com Alexandre Perrin alex@isovalent.com Alexei Starovoitov alexei.starovoitov@gmail.com @@ -28,24 +28,31 @@ Alexey Grevtsev alexey.grevtcev@gmail.com Alex Katsman alexkats@google.com Alex Romanov alex@romanov.ws Alex Szakaly alex.szakaly@gmail.com -Alok Kumar Singh alokaks601@gmail.com +Alex Waring alex.waring@starlingbank.com Amey Bhide amey@covalent.io +Amir Kheirkhahan amir.kheirkhahan@dbschenker.com amitmavgupta 115551423+amitmavgupta@users.noreply.github.com Amol Ambekar ambekara@google.com Amre Shakimov amre@covalent.io Anderson, David L david.l.anderson@intel.com Andor Nemeth andor_nemeth@swissre.com +Andreas Mårtensson andreas@addem.se Andree Klattenhoff mail@andr.ee +Andrei Kvapil kvapss@gmail.com André Martins andre@cilium.io Andrew Bulford andrew.bulford@form3.tech Andrew Holt andrew.holt@utmost.co Andrew Sauber 2046750+asauber@users.noreply.github.com Andrew Sy Kim kim.andrewsy@gmail.com +Andrew Titmuss iandrewt@icloud.com Andrey Devyatkin andrey.devyatkin@fivexl.io Andrey Klimentyev andrey.klimentyev@flant.com Andrey Voronkov voronkovaa@gmail.com +Andrii Iuspin 57713382+ayuspin@users.noreply.github.com Andrzej Mamak nqaegg@gmail.com Andy Allred andy@punasusi.com +andychuang andy.chuang@shoplineapp.com +Animesh Pathak 53110238+Sonichigo@users.noreply.github.com Aniruddha Amit Dutta duttaaniruddha31@gmail.com Anish Shah anishshah@google.com Anit Gandhi anitgandhi@gmail.com @@ -55,31 +62,40 @@ Anthony Rabbito hello@anthonyrabbito.com Antoine Coetsier acr@exoscale.ch Antoine Legrand 2t.antoine@gmail.com Antonio Ojea aojea@google.com +Anton Ippolitov anton.ippolitov@datadoghq.com +Antoni Zawodny zawodny@google.com Anton Protopopov aspsk@isovalent.com Anton Tykhyy atykhyy@gmail.com +Antony Reynaud antony.reynaud@isovalent.com Anurag Aggarwal anurag.aggarwal@flipkart.com Archana Shinde archana.m.shinde@intel.com +Ardika Bagus me@ardikabs.com Arika Chen eaglesora@gmail.com Arnaud Meukam ameukam@gmail.com +Arseniy Belorukov a.belorukov@team.bumble.com Arthur Chiao arthurchiao@hotmail.com ArthurChiao arthurchiao@hotmail.com Arthur Evstifeev mail@ap4y.me Arthur Outhenin-Chalandre arthur@cri.epita.fr Arvind Soni arvind@covalent.io +Ashley Reese ashley.reese@firma.seznam.cz Ashray Jain ashrayj@palantir.com Ashwin Paranjpe ashwin@covalent.io Assiya Khuzyakhmetova assiya.khuzyakhmetova@nu.edu.kz Atkins Chang atkinschang@gmail.com Augustas Berneckas a.berneckas@gmail.com Austin Cawley-Edwards austin.cawley@gmail.com +AwesomePatrol AwesomePatrol@users.noreply.github.com ayesha khaliq ayeshakhaliqrana@gmail.com Ayush Dwivedi ayush.dwivedi@accuknox.com +bakito github@bakito.ch Barun Acharya barun1024@gmail.com Basit Mustafa basit.mustafa@gmail.com Beatriz Martínez beatriz@isovalent.com Benjamin Leggett benjamin.leggett@solo.io Benjamin Pineau benjamin.pineau@datadoghq.com Benoît Sauvère benoit.sauvere@backmarket.com +Bernard Halas bernard.halas@berops.com Bill Mulligan billmulligan516@gmail.com Bingshen Wang bingshen.wbs@alibaba-inc.com Bingwu Yang detailyang@gmail.com @@ -94,6 +110,7 @@ Brandon McNama brandonmcnama@outlook.com Brian Topping brian@coglative.com Bruno Miguel Custódio brunomcustodio@gmail.com Bryan Stenson bryan.stenson@okta.com +bzsuni bingzhe.sun@daocloud.io Calum MacRae hi@cmacr.ae Camilo Schoeningh camilo.schoeningh@dunnhumby.com Canh Ngo canhnt@gmail.com @@ -106,26 +123,35 @@ Chance Zibolski chance.zibolski@gmail.com Changyu Wang changyuwang@tencent.com Charles-Edouard Brétéché charled.breteche@gmail.com Charles-Henri Guérin charles-henri.guerin@zenika.com +chaunceyjiang chaunceyjiang@gmail.com Chen Kang kongchen28@gmail.com +chentanjun tanjunchen20@gmail.com chenyahui chenyahui9@jd.com Chen Yaqi chenyaqi01@baidu.com chenyuezhou zcy.chenyue.zhou@gmail.com +chez-shanpu tomoki-sugiura@cybozu.co.jp +Chris Bannister c.bannister@gmail.com Chris Tarazi chris@isovalent.com Christian Hörtnagl christian2@univie.ac.at Christian Hüning christian.huening@finleap.com Christine Chen christine.chen@datadoghq.com +Christine Kim xtineskim@gmail.com Christopher Biscardi chris@christopherbiscardi.com Christopher Schmidt fakod666@gmail.com Chris Werner Rau cwrau@cwrau.info ChrsMark chrismarkou92@gmail.com Cilium Imagebot noreply@cilium.io +Cilium Release Bot noreply@cilium.io Cintia Sanchez Garcia cynthiasg@icloud.com +CJ Virtucio cjv287@gmail.com Claudia J. Kang claudiajkang@gmail.com Clément Delzotti elk1ns@outlook.fr cleverhu shouping.hu@daocloud.io cndoit18 cndoit18@outlook.com Connor Jones cj@cjmakes.com Cookie Wang luckymrwang@163.com +cornfeedhobo cornfeedhobo@fuzzlabs.org +Cory Snyder csnyder@1111systems.com Craig Box craig.box@gmail.com cui fliter imcusg@gmail.com Cynthia Thomas cynthia@covalent.io @@ -134,6 +160,7 @@ Cyril Scetbon cscetbon@gmail.com czybjtu smartczy@outlook.com Dale Ragan dale.ragan@sap.com Dalton Hubble dghubble@gmail.com +Dan Everton deverton@godaddy.com Daneyon Hansen daneyon.hansen@solo.io Đặng Minh Dũng dungdm93@live.com Daniel Borkmann daniel@iogearbox.net @@ -146,22 +173,28 @@ Danni Skov Høglund skuffe@pwnz.dk Dan Sexton dan.b.sexton@gmail.com Dan Wendlandt dan@covalent.io Dario Mader maderdario@gmail.com -darox maderdario@gmail.com Darren Foo darren.foo@shopify.com Darren Mackintosh unixdaddy@gmail.com Darshan Chaudhary deathbullet@gmail.com +DaShaun 826271+dashaun@users.noreply.github.com David Bimmler david.bimmler@isovalent.com David Birks davidebirks@gmail.com +David Boslee david@goteleport.com David Bouchare david.bouchare@datadoghq.com David Calvert david@0xdc.me David Chen davidchen94@outlook.com David Donchez donch@dailymotion.com David Korczynski david@adalogics.com +David Leadbeater dgl@dgl.cx David Schlosnagle davids@palantir.com +David Swafford dswafford@coreweave.com David Wolffberg 1350533+wolffberg@users.noreply.github.com Dawn lx1960753013@gmail.com +dddddai dddwq@foxmail.com +Dean 22192242+saintdle@users.noreply.github.com Deepesha Burse deepesha.3007@gmail.com Deepesh Pathak deepshpathak@gmail.com +Denis GERMAIN dgermain@deezer.com Denis Khachyan khachyanda.gmail.com Derek Gaffney 17263955+gaffneyd4@users.noreply.github.com Deshi Xiao xiaods@gmail.com @@ -170,6 +203,7 @@ Dharma Bellamkonda dharma.bellamkonda@gmail.com Didier Durand durand.didier@gmail.com Diego Casati diego.casati@gmail.com Dima Pugachev krabradosty@gmail.com +Dipankar Das dipankardas0115@gmail.com Divine Odazie dodazie@gmail.com Divya Mohan divya.mohan0209@gmail.com Divyansh Kamboj divyansh.kamboj@accuknox.com @@ -183,13 +217,17 @@ Dmitry Shurupov dmitry.shurupov@palark.com Dom Del Nano ddelnano@gmail.com Dom Goodwin dom.goodwin@capgemini.com Donia Chaiehloudj donia.cld@isovalent.com +Donnie McMahan jmcmaha1@gmail.com Dorde Lapcevic dordel@google.com Duffie Cooley dcooley@isovalent.com +dwalker-sabiogroup 100362969+dwalker-sabiogroup@users.noreply.github.com Dylan Reimerink dylan.reimerink@isovalent.com Ekene Nwobodo nwobodoe71@gmail.com +Electron alokaks601@gmail.com El-Fadel Bonfoh elfadel@accuknox.com +eliranw 39266788+eliranw@users.noreply.github.com Ellie Springsteen ellie.springsteen@appian.com -Eloy Coto eloy.coto@gmail.com +Eloy Coto eloy.coto@acalustra.com Emin Aktas eminaktas34@gmail.com Emmanuel T Odeke emmanuel@orijtech.com Emre Savcı emre.savci@trendyol.com @@ -203,6 +241,7 @@ Eric Ripa eric@ripa.io Erik Chang erik.chang@nordstrom.com Eugene Starchenko 17835122+eugenestarchenko@users.noreply.github.com Ewout Prangsma ewout@prangsma.net +Fabian Fischer fabian.fischer@isovalent.com Fabio Falzoi fabio.falzoi@isovalent.com Faiyaz Ahmed faiyaza@gmail.com Fankaixi Li fankaixi.li@bytedance.com @@ -212,9 +251,11 @@ fengshunli 1171313930@qq.com Fernand Galiana fernand.galiana@gmail.com Feroz Salam feroz.salam@isovalent.com FeynmanZhou pengfeizhou@yunify.com +Filip Nikolic oss.filipn@gmail.com Fish-pro zechun.chen@daocloud.io Florian Koch f0@users.noreply.github.com Florian Lehner dev@der-flo.net +Foyer Unix foyerunix@foyer.lu Francois Allard francois@breathelife.com François Joulaud francois.joulaud@radiofrance.com Frank Villaro-Dixon frank.villaro@infomaniak.com @@ -222,6 +263,7 @@ Frederic Branczyk fbranczyk@gmail.com Fred Hsu fredlhsu@gmail.com Fredrik Lönnegren fredrik.lonnegren@gmail.com Fulvio Risso fulvio.risso@polito.it +gailsuccess 157372272+gailsuccess@users.noreply.github.com Gaurav Genani h3llix.pvt@gmail.com Gaurav Yadav gaurav.dev.iiitm@gmail.com Gavin McNair gavin.mcnair@kaluza.com @@ -232,13 +274,16 @@ GH action ghabot@does.not.exist.cilium.org Gianluca Arbezzano gianarb92@gmail.com Gilberto Bertin jibi@cilium.io gjmzj jmgaozz@hotmail.com +Glen Yu glen.yu@gmail.com Glib Smaga code@gsmaga.com Gobinath Krishnamoorthy gobinathk@google.com Gowtham Sundara gowtham.sundara@rapyuta-robotics.com +gray greyschwinger@gmail.com Gray Lian gray.liang@isovalent.com Guilherme Oki guilherme.oki@wildlifestudios.com Guilherme Souza 101073+guilhermef@users.noreply.github.com Gunju Kim gjkim042@gmail.com +guoguangwu guoguangwu@magic-shield.com Haitao Li lihaitao@gmail.com Haiyue Wang haiyue.wang@intel.com Hang Yan hang.yan@hotmail.com @@ -252,14 +297,20 @@ Heiko Rothe me@heikorothe.com Hemanth Malla hemanth.malla@datadoghq.com Hemslo Wang hemslo.wang@gmail.com Hrittik hrittikcom@gmail.com +Huagong Wang wanghuagong@kylinos.cn huangxuesen huangxuesen@kuaishou.com Hui Kong hui.kong@qunar.com Hunter Massey hmassey@tradestation.com +Husni Alhamdani dhanielluis@gmail.com +Huweicai i@huweicai.com hxysayhi 51870525+hxysayhi@users.noreply.github.com Ian Vernon ian@cilium.io Ifeanyi Ubah ify1992@yahoo.com +Iiqbal2000 iqbalhafizh2000@gmail.com Ilya Dmitrichenko errordeveloper@gmail.com Ilya Shaisultanov ilya.shaisultanov@gmail.com +Ioannis Androulidakis androulidakis.ioannis@gmail.com +ishuar ishansharma887@gmail.com Ivan Makarychev i.makarychev@tinkoff.ru Ivar Lazzaro ivarlazzaro@gmail.com Jack-R-lantern tjdfkr2421@gmail.com @@ -270,17 +321,22 @@ James Brookes jbrookes@confluent.io James Laverack james@isovalent.com James McShane james.mcshane@superorbital.io Jan-Erik Rediger janerik@fnordig.de +Jan Jansen jan.jansen@gdata.de Jan Mraz strudelpi@pm.me Jarno Rajahalme jarno@isovalent.com +Jason Aliyetti jaliyetti@gmail.com +JBodkin-Amphora james.bodkin@amphora.net Jean Raby jean@raby.sh Jed Salazar jedsalazar@gmail.com Jef Spaleta jspaleta@gmail.com Jerry J. Muzsik jerrymuzsik@icloud.com +Jesse Haka haka.jesse@gmail.com Jess Frazelle acidburn@microsoft.com Jiang Wang jiang.wang@bytedance.com Jianlin Lv Jianlin.Lv@arm.com Jian Zeng anonymousknight96@gmail.com JieJhih Jhang jiejhihjhang@gmail.com +jignyasamishra iamjignyasa@gmail.com Jim Angel jimangel@google.com.com Jim Ntosas ntosas@gmail.com JinLin Fu withlin@apache.org @@ -296,16 +352,20 @@ Joey Espinosa jlouis.espinosa@gmail.com Johannes Liebermann johanan.liebermann@gmail.com John Fastabend john.fastabend@gmail.com John Gardiner Myers jgmyers@proofpoint.com +John Howard howardjohn@google.com John Watson johnw@planetscale.com John Zheng johnzhengaz@gmail.com Jomen Xiao jomenxiao@gmail.com Jonathan Davies jpds@protonmail.com +Jonathan Grahl jonathan@keyholders.io Jones Shi shilei@hotstone.com.cn +Jooho Lee jhlee@si-analytics.ai Jorik Jonker jorik.jonker@eu.equinix.com Joseph-Irving joseph.irving500@gmail.com Joseph Sheng jiajun.sheng@microfocus.com Joseph Stevens thejosephstevens@gmail.com Joshua Roppo joshroppo@gmail.com +jshr-w shjayaraman@microsoft.com Juan Jimenez-Anca cortopy@users.noreply.github.com Juha Tiensyrjä juha.tiensyrja@ouraring.com Julian Wiedmann jwi@isovalent.com @@ -316,11 +376,13 @@ Junli Ou oujunli306@gmail.com Jussi Maki jussi@isovalent.com kahirokunn okinakahiro@gmail.com Kaito Ii kaitoii1111@gmail.com +Kaloyan Yordanov Kaloyan.Yordanov@starlizard.com Kamil Lach kamil.lach.rs@gmail.com Karim Naufal rimkashox@gmail.com Karl Heins karlheins@northwesternmutual.com Karsten Nielsen karsten.nielsen@ingka.ikea.com Katarzyna Borkmann kasia@iogearbox.net +Katie Struthers 99215338+katiestruthers@users.noreply.github.com Kazuki Suda kazuki.suda@gmail.com Keisuke Kondo k.gryphus@gmail.com Kenshin Chen smwyzi@qq.com @@ -332,12 +394,16 @@ Kir Kolyshkin kolyshkin@gmail.com Koichiro Den den@klaipeden.com Konstantin Aksenov konstantin.aksenov@flant.com Kornilios Kourtis kornilios@isovalent.com +kwakubiney kebiney@hotmail.com Laurent Bernaille laurent.bernaille@datadoghq.com +Lawrence Gadban lawrence.gadban@solo.io +ldelossa louis.delos@gmail.com Lehner Florian dev@der-flo.net Leonard Cohnen lc@edgeless.systems leonliao xiaobo.liao@gmail.com Liang Zhou zhoul110@chinatelecom.cn Li Chengyuan chengyuanli@hotmail.com +Li Chun lichun823@gmail.com LiHui andrewli@yunify.com Lin Dong lindongld@google.com Lin Sun lin.sun@solo.io @@ -346,13 +412,19 @@ Liu Qun qunliu@zyhx-group.com liuxu liuxu623@gmail.com Livingstone S E livingstone.s.e@gmail.com Li Yiheng lyhutopi@gmail.com +Liyi Huang pdshly@gmail.com Liz Rice liz@lizrice.com +log1cb0mb nabeelnrana@gmail.com LongHui Li longhui.li@woqutech.com +loomkoom 29258685+loomkoom@users.noreply.github.com Lorenz Bauer lmb@isovalent.com Lorenzo Fundaró lorenzofundaro@gmail.com Louis DeLosSantos louis@isovalent.com lou-lan loulan@loulan.me +Lucas Leblow lucasleblow@mailbox.org lucming 2876757716@qq.com +Ludovic Ortega ludovic.ortega@adminafk.fr +Lukas Stehlik stehlik.lukas@gmail.com Maartje Eyskens maartje@eyskens.me Maciej Fijalkowski maciej.fijalkowski@intel.com Maciej Kwiek maciej@isovalent.com @@ -361,24 +433,29 @@ Madhu Challa madhu@cilium.io Madhusudan.C.S madhusudancs@gmail.com Mahadev Panchal mahadev.panchal@benisontech.com MaiReo sawako.saki@gmail.com +Mais mai.saleh@siemens.com Maksym Lushpenko iviakciivi@gmail.com Manali Bhutiyani manali@covalent.io Mandar U Jog mjog@google.com Manuel Buil mbuil@suse.com +Manuel Rüger manuel@rueg.eu Manuel Stößel manuel.stoessel@t-systems.com Marcel Zieba marcel.zieba@isovalent.com Marcin Skarbek git@skarbek.name Marcin Swiderski forgems@gmail.com +Marco Aurelio Caldas Miranda 17923899+macmiranda@users.noreply.github.com Marco Hofstetter marco.hofstetter@isovalent.com Marco Iorio marco.iorio@isovalent.com Marco Kilchhofer mkilchhofer@users.noreply.github.com Marc Stulz m@footek.ch Marek Chodor mchodor@google.com Marga Manterola marga@isovalent.com +Marino Wijay 45947861+distributethe6ix@users.noreply.github.com Mario Constanti mario@constanti.de Marius Gerling marius.gerling@uniberg.com Mark deVilliers markdevilliers@gmail.com Mark Pashmfouroush mark@isovalent.com +Mark St John markstjohn@google.com Markus Blaschke mblaschke82@gmail.com Martin Charles martincharles07@gmail.com Martin Koppehel martin.koppehel@st.ovgu.de @@ -390,6 +467,7 @@ Matej Gera matejgera@gmail.com Mathias Herzog mathu@gmx.ch Mathieu Parent math.parent@gmail.com Mathieu Tortuyaux mtortuyaux@microsoft.com +Mathis Joffre 51022808+Joffref@users.noreply.github.com Matt Anderson matanderson@equinix.com Matthew Fenwick mfenwick100@gmail.com Matthew Gumport me@gum.pt @@ -409,14 +487,19 @@ Michael Fischer fiscmi@amazon.com Michael Fornaro 20387402+xUnholy@users.noreply.github.com Michael Francis michael@melenion.com Michael Kashin mmkashin@gmail.com +Michael Mykhaylov 32168861+mikemykhaylov@users.noreply.github.com Michael Petrov michael@openai.com Michael Ryan Dempsey bluestealth@bluestealth.pw +michaelsaah michael.saah@segment.com +Michael Saah msaah@twilio.com Michael Schubert michael@kinvolk.io Michael Vorburger vorburger@redhat.com Michal Rostecki vadorovsky@gmail.com +Michal Siwinski siwy@google.com Michi Mutsuzaki michi@isovalent.com Mike Fedosin mfedosin@gmail.com MikeLing sabergeass@gmail.com +Mike Mwanje mwanjemike767@gmail.com Mitch Hulscher mitch.hulscher@lib.io Moh Ahmed moh.ahmed@cengn.ca Mohammad Yosefpor 47300215+m-yosefpor@users.noreply.github.com @@ -424,6 +507,8 @@ Mohit Marathe mohitmarathe23@gmail.com Moritz Eckert m1gh7ym0@gmail.com Moritz Johner beller.moritz@googlemail.com Moshe Immerman moshe.immerman@vitalitygroup.com +mrproliu 741550557@qq.com +Natalia Reka Ivanko natalia@isovalent.com Nate Sweet nathanjsweet@pm.me Nate Taylor ntaylor1781@gmail.com Nathan Bird njbird@infiniteenergy.com @@ -435,8 +520,9 @@ necatican necaticanyildirim@gmail.com Neela Jacques neela@isovalent.com Neil Seward neil.seward@elasticpath.com Neil Wilson neil@aldur.co.uk +Neutrollized glen.yu@gmail.com Nick M 4718+rkage@users.noreply.github.com -Nick Young nick@isovalent.com +Nick Young ynick@cisco.com Niclas Mietz solidnerd@users.noreply.github.com Nico Berlee nico.berlee@on2it.net Nicolas Busseneau nicolas@isovalent.com @@ -448,8 +534,11 @@ Nikolay Nikolaev nicknickolaev@gmail.com Nirmoy Das ndas@suse.de Nishant Burte nburte@google.com Nitish Malhotra nitishm@microsoft.com +Nitish Tiwari nitish@parseable.io Noel Georgi git@frezbo.dev nrnrk noriki6t@gmail.com +nuwa nuwa@yannis.codes +nxyt lolnoxy@gmail.com Odin Ugedal ougedal@palantir.com Oilbeater mengxin@alauda.io Oksana Baranova oksana.baranova@intel.com @@ -459,6 +548,7 @@ Oliver Ni oliver.ni@gmail.com Oliver Wang a0924100192@gmail.com Omar Aloraini ooraini.dev@gmail.com Ondrej Blazek ondrej.blazek@firma.seznam.cz +oneumyvakin oneumyvaking@mail.ru Osthues osthues.matthias@gmail.com Pablo Ruiz pablo.ruiz@gmail.com Paco Xu paco.xu@daocloud.io @@ -466,8 +556,11 @@ Parth Patel parth.psu@gmail.com Patrice Chalin chalin@cncf.io Patrice Peterson patrice.peterson@mailbox.org Patrick Mahoney pmahoney@greenkeytech.com +Patrick O’Brien patrick.obrien@thetradedesk.com +Patrick Reich patrick@neodyme.io Pat Riehecky riehecky@fnal.gov Patrik Cyvoct patrik@ptrk.io +Paul Bailey spacepants@users.noreply.github.com Paul Chaignon paul.chaignon@gmail.com Paulo Gomes pjbgf@linux.com Pavel Pavlov 40396270+PavelPavlov46@users.noreply.github.com @@ -479,8 +572,9 @@ Peter Jausovec peter.jausovec@solo.io Peter Slovak slovak.peto@gmail.com Philippe Lafoucrière philippe.lafoucriere@gmail.com Philipp Gniewosz philipp.gniewosz@daimlertruck.com -Philip Schmid philip.schmid@isovalent.com +Philip Schmid phisch@cisco.com Pierre-Yves Aillet pyaillet@gmail.com +Pieter van der Giessen pieter@pionative.com Pranavi Roy pranvyr@gmail.com Prashanth.B beeps@google.com Pratyush Singhal psinghal20@gmail.com @@ -488,25 +582,32 @@ Priya Sharma Priya.Sharma6693@gmail.com Qasim Sarfraz qasim.sarfraz@esailors.de Qifeng Guo qifeng.guo@daocloud.io Qingchuan Hao qinhao@microsoft.com -Quentin Monnet quentin@isovalent.com +Quentin Monnet qmo@qmon.net Raam ram29@bskyb.com Rachid Zarouali rachid.zarouali@sevensphere.io +Rafael da Fonseca rafael.fonseca@wildlifestudios.com Raghu Gyambavantha raghug@bld-ml-loan4.olympus.f5net.com Rahul Jadhav nyrahul@gmail.com Rahul Joshi rkjoshi@google.com Rajat Jindal rajatjindal83@gmail.com +Ralph Bankston ralph.bankston@isovalent.com Raphael Campos raphael@accuknox.com Raphaël Pinson raphael@isovalent.com Rastislav Szabo rastislav.szabo@isovalent.com Rauan Mayemir rauan@mayemir.io +rawmind0 rawmind@gmail.com Ray Bejjani ray.bejjani@gmail.com Raymond de Jong raymond.dejong@isovalent.com Reilly Brogan reilly@reillybrogan.com Rei Shimizu Shikugawa@gmail.com Rémy Léone rleone@scaleway.com Renat Tuktarov yandzeek@gmail.com +Renaud Gaubert renaud@openai.com Rene Luria rene@luria.ch +René Veenhuis re.veenhuis@gmail.com Rene Zbinden rene.zbinden@postfinance.ch +Richard Lavoie richard.lavoie@logmein.com +Richard Tweed RichardoC@users.noreply.github.com Ricky Ho horicky78@gmail.com Rio Kierkels riokierkels@gmail.com Robin Gögge r.goegge@isovalent.com @@ -530,48 +631,60 @@ Saim Safdar 59512053+Saim-Safdar@users.noreply.githu Saiyam Pathak saiyam@civo.com Salvatore Mazzarino salvatore@accuknox.com Sami Yessou fnzv@users.noreply.github.com +Samuel Lang gh@lang-sam.de Samuel Torres samuelpirestorres@gmail.com Sander Timmerman stimmerman@schubergphilis.com Sandipan Panda samparksandipan@gmail.com Sarah Corleissen sarah.corleissen@isovalent.com Sarvesh Rangnekar sarveshr@google.com +Sascha Grunert sgrunert@redhat.com +Satish Matti smatti@google.com Scott Albertson ascottalbertson@gmail.com Sean Winn sean@isovalent.com Sebastian Nickel nick@nine.ch Sebastian Rojo arpagon@gmail.com Sebastian Wicki sebastian@isovalent.com +Sebastien Lafond sebastien.lafond@cdiscount.com Sebastien Thomas prune@lecentre.net Sergey Generalov sergey@isovalent.com Sergey Monakhov monakhov@puzl.ee Sergey Shevchenko sergeyshevchdevelop@gmail.com Sergio Ballesteros snaker@locolandia.net +sh2 shawnhxh@outlook.com Shane Utt shaneutt@linux.com Shantanu Deshpande shantanud106@gmail.com Shunpoco tkngsnsk313320@gmail.com Sigurd Spieckermann sigurd.spieckermann@gmail.com +Simone Magnani simone.magnani@isovalent.com Simone Sciarrati s.sciarrati@gmail.com Simon Pasquier spasquier@mirantis.com +sknop 118932232+sknop-cgn@users.noreply.github.com Smaine Kahlouch smainklh@gmail.com +soggiest nicholas@isovalent.com spacewander spacewanderlzx@gmail.com Stacy Kim stacy.kim@ucla.edu Stephen Martin lockwood@opperline.com +Steve Gargan sgargan@qualtrics.com Steven Ceuppens steven.ceuppens@icloud.com Steven Dake steven.dake@gmail.com Steven Johnson sjdot@protonmail.com +Steven Kreitzer skre@skre.me Steven Normore snormore@digitalocean.com Steven Shuang stevenshuang521@gmail.com Stevo Slavić sslavic@gmail.com Stijn Smits stijn@stijn98s.nl Strukov Anton anstrukov@luxoft.com Stuart Preston mail@stuartpreston.net +Su Fei sofat1989@126.com Sugang Li sugangli@google.com Sven Haardiek sven.haardiek@uni-muenster.de Swaminathan Vasudevan svasudevan@suse.com Taeung Song treeze.taeung@gmail.com Takayoshi Nishida takayoshi.nishida@gmail.com -Tamilmani tamanoha@microsoft.comwq +Tamilmani tamanoha@microsoft.com Tam Mach tam.mach@cilium.io Tasdik Rahman prodicus@outlook.com +Taylor tskinn12@gmail.com Te-Yu Chang dale.teyuchang@gmail.com Thales Paiva thales@accuknox.com TheAifam5 theaifam5@gmail.com @@ -582,6 +695,7 @@ Thomas Bachman tbachman@yahoo.com Thomas Balthazar thomas@balthazar.info Thomas Gosteli thomas.gosteli@protonmail.com Thomas Graf thomas@cilium.io +Thorben von Hacht tvonhacht@apple.com tigerK yanru.lv@daocloud.io Tim Horner timothy.horner@isovalent.com Timo Beckers timo@isovalent.com @@ -592,7 +706,8 @@ Tobias Klauser tobias@cilium.io Tobias Kohlbau tobias@kohlbau.de Tobias Mose mosetobias@gmail.com Tom Hadlaw tom.hadlaw@isovalent.com -Tomoki Sugiura cheztomo513@gmail.com +Tommo Cowling 952241+tlcowling@users.noreply.github.com +Tomoki Sugiura tomoki-sugiura@cybozu.co.jp Tomoya Fujita Tomoya.Fujita@sony.com Tom Payne twpayne@gmail.com Tony Lambiris tony@criticalstack.com @@ -604,16 +719,18 @@ Travis Glenn Hansen travisghansen@yahoo.com Trevor Roberts Jr Trevor.Roberts.Jr@gmail.com Trevor Tao trevor.tao@arm.com Umesh Keerthy B S umesh.freelance@gmail.com +usiegl00 50933431+usiegl00@users.noreply.github.com Vadim Ponomarev velizarx@gmail.com +vakr vakr@microsoft.com Valas Valancius valas@google.com Vance Li vanceli@tencent.com Vigneshwaren Sunder vickymailed@gmail.com -viktor-kurchenko 69600804+viktor-kurchenko@users.noreply.github.com +viktor-kurchenko viktor.kurchenko@isovalent.com Viktor Kuzmin kvaster@gmail.com Viktor Oreshkin imselfish@stek29.rocks Ville Ojamo bluikko@users.noreply.github.com Vincent Li vincent.mc.li@gmail.com -Vipul Singh singhvipul@microsoft.com +Vipul Singh vipul21sept@gmail.com Vishal Choudhary sendtovishalchoudhary@gmail.com Vishnu Soman K vishnusomank05@gmail.com Vlad Artamonov 742047+vladdy@users.noreply.github.com @@ -641,12 +758,16 @@ Xiaoqing xiaoqingnb@gmail.com Xiaoyang Zhu zhuxiaoyang1996@gmail.com XiaozhiD-web chuanzhi.dai@daocloud.io Xin Li xin.li@daocloud.io +xinwenqiang xinwenqiang@bytedance.com Xinyuan Zhang zhangxinyuan@google.com +xyz-li hui0787411@163.com yanggang gang.yang@daocloud.io yanhongchang yanhongchang@100tal.com +Yann ILAS yann.ilas@gmail.com Yash Shetty yashshetty@google.com Ye Sijun junnplus@gmail.com Yiannis Yiakoumis yiannis@selfienetworks.com +Yingnan Zhang 342144303@qq.com Yongkun Gui ygui@google.com Yosh de Vos yosh@elzorro.nl youhonglian honglian.you@daocloud.io @@ -657,9 +778,10 @@ Yugo Kobayashi kobdotsh@gmail.com yulng wei.yang@daocloud.io Yurii Dzobak yurii.dzobak@lotusflare.com Yurii Komar Subreptivus@gmail.com -Yusuke Suzuki yusuke-suzuki@cybozu.co.jp +Yusuke Suzuki yusuke.suzuki@isovalent.com Yutaro Hayakawa yutaro.hayakawa@isovalent.com Yves Blusseau yves.blusseau@acoss.fr +yylt yang8518296@163.com Zang Li zangli@google.com zhanghe9702 zhanghe9702@163.com Zhang Qiang qiangzhang@qiyi.com diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go index 194e031d02..e7ee6fb081 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go @@ -35,6 +35,10 @@ type ClientOption func(*runtime.ClientOperation) type ClientService interface { GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error) + GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) + + GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) + SetTransport(transport runtime.ClientTransport) } @@ -81,6 +85,86 @@ func (a *Client) GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (* panic(msg) } +/* +GetBgpRoutePolicies lists b g p route policies configured in b g p control plane + +Retrieves route policies from BGP Control Plane. +*/ +func (a *Client) GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetBgpRoutePoliciesParams() + } + op := &runtime.ClientOperation{ + ID: "GetBgpRoutePolicies", + Method: "GET", + PathPattern: "/bgp/route-policies", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetBgpRoutePoliciesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetBgpRoutePoliciesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetBgpRoutePolicies: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetBgpRoutes lists b g p routes from b g p control plane r i b + +Retrieves routes from BGP Control Plane RIB filtered by parameters you specify +*/ +func (a *Client) GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetBgpRoutesParams() + } + op := &runtime.ClientOperation{ + ID: "GetBgpRoutes", + Method: "GET", + PathPattern: "/bgp/routes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetBgpRoutesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetBgpRoutesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetBgpRoutes: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + // SetTransport changes the transport on the client func (a *Client) SetTransport(transport runtime.ClientTransport) { a.transport = transport diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go index f06304ce69..a1ed8d93ca 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go @@ -32,6 +32,18 @@ func (o *GetBgpPeersReader) ReadResponse(response runtime.ClientResponse, consum return nil, err } return result, nil + case 500: + result := NewGetBgpPeersInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpPeersDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result default: return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) } @@ -97,3 +109,125 @@ func (o *GetBgpPeersOK) readResponse(response runtime.ClientResponse, consumer r return nil } + +// NewGetBgpPeersInternalServerError creates a GetBgpPeersInternalServerError with default headers values +func NewGetBgpPeersInternalServerError() *GetBgpPeersInternalServerError { + return &GetBgpPeersInternalServerError{} +} + +/* +GetBgpPeersInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpPeersInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp peers internal server error response has a 2xx status code +func (o *GetBgpPeersInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp peers internal server error response has a 3xx status code +func (o *GetBgpPeersInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp peers internal server error response has a 4xx status code +func (o *GetBgpPeersInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp peers internal server error response has a 5xx status code +func (o *GetBgpPeersInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp peers internal server error response a status code equal to that given +func (o *GetBgpPeersInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetBgpPeersInternalServerError) Error() string { + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpPeersInternalServerError) String() string { + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpPeersInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpPeersInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpPeersDisabled creates a GetBgpPeersDisabled with default headers values +func NewGetBgpPeersDisabled() *GetBgpPeersDisabled { + return &GetBgpPeersDisabled{} +} + +/* +GetBgpPeersDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpPeersDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp peers disabled response has a 2xx status code +func (o *GetBgpPeersDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp peers disabled response has a 3xx status code +func (o *GetBgpPeersDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp peers disabled response has a 4xx status code +func (o *GetBgpPeersDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp peers disabled response has a 5xx status code +func (o *GetBgpPeersDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp peers disabled response a status code equal to that given +func (o *GetBgpPeersDisabled) IsCode(code int) bool { + return code == 501 +} + +func (o *GetBgpPeersDisabled) Error() string { + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpPeersDisabled) String() string { + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpPeersDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpPeersDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go new file mode 100644 index 0000000000..db2421c1a4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetBgpRoutePoliciesParams creates a new GetBgpRoutePoliciesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetBgpRoutePoliciesParams() *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetBgpRoutePoliciesParamsWithTimeout creates a new GetBgpRoutePoliciesParams object +// with the ability to set a timeout on a request. +func NewGetBgpRoutePoliciesParamsWithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + timeout: timeout, + } +} + +// NewGetBgpRoutePoliciesParamsWithContext creates a new GetBgpRoutePoliciesParams object +// with the ability to set a context for a request. +func NewGetBgpRoutePoliciesParamsWithContext(ctx context.Context) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + Context: ctx, + } +} + +// NewGetBgpRoutePoliciesParamsWithHTTPClient creates a new GetBgpRoutePoliciesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetBgpRoutePoliciesParamsWithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + HTTPClient: client, + } +} + +/* +GetBgpRoutePoliciesParams contains all the parameters to send to the API endpoint + + for the get bgp route policies operation. + + Typically these are written to a http.Request. +*/ +type GetBgpRoutePoliciesParams struct { + + /* RouterAsn. + + Autonomous System Number (ASN) identifying a BGP virtual router instance. + If not specified, all virtual router instances are selected. + + */ + RouterAsn *int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get bgp route policies params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutePoliciesParams) WithDefaults() *GetBgpRoutePoliciesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get bgp route policies params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutePoliciesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithContext(ctx context.Context) *GetBgpRoutePoliciesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRouterAsn adds the routerAsn to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutePoliciesParams { + o.SetRouterAsn(routerAsn) + return o +} + +// SetRouterAsn adds the routerAsn to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetRouterAsn(routerAsn *int64) { + o.RouterAsn = routerAsn +} + +// WriteToRequest writes these params to a swagger request +func (o *GetBgpRoutePoliciesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RouterAsn != nil { + + // query param router_asn + var qrRouterAsn int64 + + if o.RouterAsn != nil { + qrRouterAsn = *o.RouterAsn + } + qRouterAsn := swag.FormatInt64(qrRouterAsn) + if qRouterAsn != "" { + + if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go new file mode 100644 index 0000000000..fe25bc9d63 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go @@ -0,0 +1,233 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetBgpRoutePoliciesReader is a Reader for the GetBgpRoutePolicies structure. +type GetBgpRoutePoliciesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetBgpRoutePoliciesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetBgpRoutePoliciesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewGetBgpRoutePoliciesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpRoutePoliciesDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetBgpRoutePoliciesOK creates a GetBgpRoutePoliciesOK with default headers values +func NewGetBgpRoutePoliciesOK() *GetBgpRoutePoliciesOK { + return &GetBgpRoutePoliciesOK{} +} + +/* +GetBgpRoutePoliciesOK describes a response with status code 200, with default header values. + +Success +*/ +type GetBgpRoutePoliciesOK struct { + Payload []*models.BgpRoutePolicy +} + +// IsSuccess returns true when this get bgp route policies o k response has a 2xx status code +func (o *GetBgpRoutePoliciesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get bgp route policies o k response has a 3xx status code +func (o *GetBgpRoutePoliciesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies o k response has a 4xx status code +func (o *GetBgpRoutePoliciesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies o k response has a 5xx status code +func (o *GetBgpRoutePoliciesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get bgp route policies o k response a status code equal to that given +func (o *GetBgpRoutePoliciesOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetBgpRoutePoliciesOK) Error() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %+v", 200, o.Payload) +} + +func (o *GetBgpRoutePoliciesOK) String() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %+v", 200, o.Payload) +} + +func (o *GetBgpRoutePoliciesOK) GetPayload() []*models.BgpRoutePolicy { + return o.Payload +} + +func (o *GetBgpRoutePoliciesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutePoliciesInternalServerError creates a GetBgpRoutePoliciesInternalServerError with default headers values +func NewGetBgpRoutePoliciesInternalServerError() *GetBgpRoutePoliciesInternalServerError { + return &GetBgpRoutePoliciesInternalServerError{} +} + +/* +GetBgpRoutePoliciesInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpRoutePoliciesInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp route policies internal server error response has a 2xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp route policies internal server error response has a 3xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies internal server error response has a 4xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies internal server error response has a 5xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp route policies internal server error response a status code equal to that given +func (o *GetBgpRoutePoliciesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetBgpRoutePoliciesInternalServerError) Error() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpRoutePoliciesInternalServerError) String() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpRoutePoliciesInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutePoliciesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutePoliciesDisabled creates a GetBgpRoutePoliciesDisabled with default headers values +func NewGetBgpRoutePoliciesDisabled() *GetBgpRoutePoliciesDisabled { + return &GetBgpRoutePoliciesDisabled{} +} + +/* +GetBgpRoutePoliciesDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpRoutePoliciesDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp route policies disabled response has a 2xx status code +func (o *GetBgpRoutePoliciesDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp route policies disabled response has a 3xx status code +func (o *GetBgpRoutePoliciesDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies disabled response has a 4xx status code +func (o *GetBgpRoutePoliciesDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies disabled response has a 5xx status code +func (o *GetBgpRoutePoliciesDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp route policies disabled response a status code equal to that given +func (o *GetBgpRoutePoliciesDisabled) IsCode(code int) bool { + return code == 501 +} + +func (o *GetBgpRoutePoliciesDisabled) Error() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpRoutePoliciesDisabled) String() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpRoutePoliciesDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutePoliciesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go new file mode 100644 index 0000000000..9fa279b03d --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go @@ -0,0 +1,286 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetBgpRoutesParams creates a new GetBgpRoutesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetBgpRoutesParams() *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetBgpRoutesParamsWithTimeout creates a new GetBgpRoutesParams object +// with the ability to set a timeout on a request. +func NewGetBgpRoutesParamsWithTimeout(timeout time.Duration) *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + timeout: timeout, + } +} + +// NewGetBgpRoutesParamsWithContext creates a new GetBgpRoutesParams object +// with the ability to set a context for a request. +func NewGetBgpRoutesParamsWithContext(ctx context.Context) *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + Context: ctx, + } +} + +// NewGetBgpRoutesParamsWithHTTPClient creates a new GetBgpRoutesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetBgpRoutesParamsWithHTTPClient(client *http.Client) *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + HTTPClient: client, + } +} + +/* +GetBgpRoutesParams contains all the parameters to send to the API endpoint + + for the get bgp routes operation. + + Typically these are written to a http.Request. +*/ +type GetBgpRoutesParams struct { + + /* Afi. + + Address Family Indicator (AFI) of a BGP route + */ + Afi string + + /* Neighbor. + + IP address specifying a BGP neighbor. + Has to be specified only when table type is adj-rib-in or adj-rib-out. + + */ + Neighbor *string + + /* RouterAsn. + + Autonomous System Number (ASN) identifying a BGP virtual router instance. + If not specified, all virtual router instances are selected. + + */ + RouterAsn *int64 + + /* Safi. + + Subsequent Address Family Indicator (SAFI) of a BGP route + */ + Safi string + + /* TableType. + + BGP Routing Information Base (RIB) table type + */ + TableType string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get bgp routes params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutesParams) WithDefaults() *GetBgpRoutesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get bgp routes params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get bgp routes params +func (o *GetBgpRoutesParams) WithTimeout(timeout time.Duration) *GetBgpRoutesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get bgp routes params +func (o *GetBgpRoutesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get bgp routes params +func (o *GetBgpRoutesParams) WithContext(ctx context.Context) *GetBgpRoutesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get bgp routes params +func (o *GetBgpRoutesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get bgp routes params +func (o *GetBgpRoutesParams) WithHTTPClient(client *http.Client) *GetBgpRoutesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get bgp routes params +func (o *GetBgpRoutesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAfi adds the afi to the get bgp routes params +func (o *GetBgpRoutesParams) WithAfi(afi string) *GetBgpRoutesParams { + o.SetAfi(afi) + return o +} + +// SetAfi adds the afi to the get bgp routes params +func (o *GetBgpRoutesParams) SetAfi(afi string) { + o.Afi = afi +} + +// WithNeighbor adds the neighbor to the get bgp routes params +func (o *GetBgpRoutesParams) WithNeighbor(neighbor *string) *GetBgpRoutesParams { + o.SetNeighbor(neighbor) + return o +} + +// SetNeighbor adds the neighbor to the get bgp routes params +func (o *GetBgpRoutesParams) SetNeighbor(neighbor *string) { + o.Neighbor = neighbor +} + +// WithRouterAsn adds the routerAsn to the get bgp routes params +func (o *GetBgpRoutesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutesParams { + o.SetRouterAsn(routerAsn) + return o +} + +// SetRouterAsn adds the routerAsn to the get bgp routes params +func (o *GetBgpRoutesParams) SetRouterAsn(routerAsn *int64) { + o.RouterAsn = routerAsn +} + +// WithSafi adds the safi to the get bgp routes params +func (o *GetBgpRoutesParams) WithSafi(safi string) *GetBgpRoutesParams { + o.SetSafi(safi) + return o +} + +// SetSafi adds the safi to the get bgp routes params +func (o *GetBgpRoutesParams) SetSafi(safi string) { + o.Safi = safi +} + +// WithTableType adds the tableType to the get bgp routes params +func (o *GetBgpRoutesParams) WithTableType(tableType string) *GetBgpRoutesParams { + o.SetTableType(tableType) + return o +} + +// SetTableType adds the tableType to the get bgp routes params +func (o *GetBgpRoutesParams) SetTableType(tableType string) { + o.TableType = tableType +} + +// WriteToRequest writes these params to a swagger request +func (o *GetBgpRoutesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param afi + qrAfi := o.Afi + qAfi := qrAfi + if qAfi != "" { + + if err := r.SetQueryParam("afi", qAfi); err != nil { + return err + } + } + + if o.Neighbor != nil { + + // query param neighbor + var qrNeighbor string + + if o.Neighbor != nil { + qrNeighbor = *o.Neighbor + } + qNeighbor := qrNeighbor + if qNeighbor != "" { + + if err := r.SetQueryParam("neighbor", qNeighbor); err != nil { + return err + } + } + } + + if o.RouterAsn != nil { + + // query param router_asn + var qrRouterAsn int64 + + if o.RouterAsn != nil { + qrRouterAsn = *o.RouterAsn + } + qRouterAsn := swag.FormatInt64(qrRouterAsn) + if qRouterAsn != "" { + + if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil { + return err + } + } + } + + // query param safi + qrSafi := o.Safi + qSafi := qrSafi + if qSafi != "" { + + if err := r.SetQueryParam("safi", qSafi); err != nil { + return err + } + } + + // query param table_type + qrTableType := o.TableType + qTableType := qrTableType + if qTableType != "" { + + if err := r.SetQueryParam("table_type", qTableType); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go new file mode 100644 index 0000000000..f7211a4679 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go @@ -0,0 +1,233 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetBgpRoutesReader is a Reader for the GetBgpRoutes structure. +type GetBgpRoutesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetBgpRoutesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetBgpRoutesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewGetBgpRoutesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpRoutesDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetBgpRoutesOK creates a GetBgpRoutesOK with default headers values +func NewGetBgpRoutesOK() *GetBgpRoutesOK { + return &GetBgpRoutesOK{} +} + +/* +GetBgpRoutesOK describes a response with status code 200, with default header values. + +Success +*/ +type GetBgpRoutesOK struct { + Payload []*models.BgpRoute +} + +// IsSuccess returns true when this get bgp routes o k response has a 2xx status code +func (o *GetBgpRoutesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get bgp routes o k response has a 3xx status code +func (o *GetBgpRoutesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp routes o k response has a 4xx status code +func (o *GetBgpRoutesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp routes o k response has a 5xx status code +func (o *GetBgpRoutesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get bgp routes o k response a status code equal to that given +func (o *GetBgpRoutesOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetBgpRoutesOK) Error() string { + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %+v", 200, o.Payload) +} + +func (o *GetBgpRoutesOK) String() string { + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %+v", 200, o.Payload) +} + +func (o *GetBgpRoutesOK) GetPayload() []*models.BgpRoute { + return o.Payload +} + +func (o *GetBgpRoutesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutesInternalServerError creates a GetBgpRoutesInternalServerError with default headers values +func NewGetBgpRoutesInternalServerError() *GetBgpRoutesInternalServerError { + return &GetBgpRoutesInternalServerError{} +} + +/* +GetBgpRoutesInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpRoutesInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp routes internal server error response has a 2xx status code +func (o *GetBgpRoutesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp routes internal server error response has a 3xx status code +func (o *GetBgpRoutesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp routes internal server error response has a 4xx status code +func (o *GetBgpRoutesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp routes internal server error response has a 5xx status code +func (o *GetBgpRoutesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp routes internal server error response a status code equal to that given +func (o *GetBgpRoutesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetBgpRoutesInternalServerError) Error() string { + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpRoutesInternalServerError) String() string { + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpRoutesInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutesDisabled creates a GetBgpRoutesDisabled with default headers values +func NewGetBgpRoutesDisabled() *GetBgpRoutesDisabled { + return &GetBgpRoutesDisabled{} +} + +/* +GetBgpRoutesDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpRoutesDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp routes disabled response has a 2xx status code +func (o *GetBgpRoutesDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp routes disabled response has a 3xx status code +func (o *GetBgpRoutesDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp routes disabled response has a 4xx status code +func (o *GetBgpRoutesDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp routes disabled response has a 5xx status code +func (o *GetBgpRoutesDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp routes disabled response a status code equal to that given +func (o *GetBgpRoutesDisabled) IsCode(code int) bool { + return code == 501 +} + +func (o *GetBgpRoutesDisabled) Error() string { + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpRoutesDisabled) String() string { + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpRoutesDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go index 0f824d32b6..650a1cf381 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go @@ -42,6 +42,8 @@ type ClientService interface { GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) + GetHealth(params *GetHealthParams, opts ...ClientOption) (*GetHealthOK, error) + GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error) @@ -174,7 +176,7 @@ func (a *Client) GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetC } /* -GetDebuginfo retrieves information about the agent and evironment for debugging +GetDebuginfo retrieves information about the agent and environment for debugging */ func (a *Client) GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) { // TODO: Validate the params before sending @@ -211,6 +213,46 @@ func (a *Client) GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) panic(msg) } +/* +GetHealth gets modules health of cilium daemon + +Returns modules health and status information of the Cilium daemon. +*/ +func (a *Client) GetHealth(params *GetHealthParams, opts ...ClientOption) (*GetHealthOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetHealthParams() + } + op := &runtime.ClientOperation{ + ID: "GetHealth", + Method: "GET", + PathPattern: "/health", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetHealthReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetHealthOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetHealth: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* GetHealthz gets health of cilium daemon diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_parameters.go new file mode 100644 index 0000000000..dba3522587 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_parameters.go @@ -0,0 +1,159 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package daemon + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetHealthParams creates a new GetHealthParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetHealthParams() *GetHealthParams { + return &GetHealthParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetHealthParamsWithTimeout creates a new GetHealthParams object +// with the ability to set a timeout on a request. +func NewGetHealthParamsWithTimeout(timeout time.Duration) *GetHealthParams { + return &GetHealthParams{ + timeout: timeout, + } +} + +// NewGetHealthParamsWithContext creates a new GetHealthParams object +// with the ability to set a context for a request. +func NewGetHealthParamsWithContext(ctx context.Context) *GetHealthParams { + return &GetHealthParams{ + Context: ctx, + } +} + +// NewGetHealthParamsWithHTTPClient creates a new GetHealthParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetHealthParamsWithHTTPClient(client *http.Client) *GetHealthParams { + return &GetHealthParams{ + HTTPClient: client, + } +} + +/* +GetHealthParams contains all the parameters to send to the API endpoint + + for the get health operation. + + Typically these are written to a http.Request. +*/ +type GetHealthParams struct { + + /* Brief. + + Brief is a brief representation of the Cilium status. + + */ + Brief *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get health params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetHealthParams) WithDefaults() *GetHealthParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get health params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetHealthParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get health params +func (o *GetHealthParams) WithTimeout(timeout time.Duration) *GetHealthParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get health params +func (o *GetHealthParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get health params +func (o *GetHealthParams) WithContext(ctx context.Context) *GetHealthParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get health params +func (o *GetHealthParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get health params +func (o *GetHealthParams) WithHTTPClient(client *http.Client) *GetHealthParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get health params +func (o *GetHealthParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBrief adds the brief to the get health params +func (o *GetHealthParams) WithBrief(brief *bool) *GetHealthParams { + o.SetBrief(brief) + return o +} + +// SetBrief adds the brief to the get health params +func (o *GetHealthParams) SetBrief(brief *bool) { + o.Brief = brief +} + +// WriteToRequest writes these params to a swagger request +func (o *GetHealthParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Brief != nil { + + // header param brief + if err := r.SetHeaderParam("brief", swag.FormatBool(*o.Brief)); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_responses.go new file mode 100644 index 0000000000..17a878fa16 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_responses.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package daemon + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetHealthReader is a Reader for the GetHealth structure. +type GetHealthReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetHealthReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetHealthOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetHealthOK creates a GetHealthOK with default headers values +func NewGetHealthOK() *GetHealthOK { + return &GetHealthOK{} +} + +/* +GetHealthOK describes a response with status code 200, with default header values. + +Success +*/ +type GetHealthOK struct { + Payload *models.ModulesHealth +} + +// IsSuccess returns true when this get health o k response has a 2xx status code +func (o *GetHealthOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get health o k response has a 3xx status code +func (o *GetHealthOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get health o k response has a 4xx status code +func (o *GetHealthOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get health o k response has a 5xx status code +func (o *GetHealthOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get health o k response a status code equal to that given +func (o *GetHealthOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetHealthOK) Error() string { + return fmt.Sprintf("[GET /health][%d] getHealthOK %+v", 200, o.Payload) +} + +func (o *GetHealthOK) String() string { + return fmt.Sprintf("[GET /health][%d] getHealthOK %+v", 200, o.Payload) +} + +func (o *GetHealthOK) GetPayload() *models.ModulesHealth { + return o.Payload +} + +func (o *GetHealthOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ModulesHealth) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go index c2e2ea7930..28d2c62375 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go @@ -74,9 +74,11 @@ type DeleteEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go new file mode 100644 index 0000000000..8d28629fed --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package endpoint + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// NewDeleteEndpointParams creates a new DeleteEndpointParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteEndpointParams() *DeleteEndpointParams { + return &DeleteEndpointParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteEndpointParamsWithTimeout creates a new DeleteEndpointParams object +// with the ability to set a timeout on a request. +func NewDeleteEndpointParamsWithTimeout(timeout time.Duration) *DeleteEndpointParams { + return &DeleteEndpointParams{ + timeout: timeout, + } +} + +// NewDeleteEndpointParamsWithContext creates a new DeleteEndpointParams object +// with the ability to set a context for a request. +func NewDeleteEndpointParamsWithContext(ctx context.Context) *DeleteEndpointParams { + return &DeleteEndpointParams{ + Context: ctx, + } +} + +// NewDeleteEndpointParamsWithHTTPClient creates a new DeleteEndpointParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteEndpointParamsWithHTTPClient(client *http.Client) *DeleteEndpointParams { + return &DeleteEndpointParams{ + HTTPClient: client, + } +} + +/* +DeleteEndpointParams contains all the parameters to send to the API endpoint + + for the delete endpoint operation. + + Typically these are written to a http.Request. +*/ +type DeleteEndpointParams struct { + + // Endpoint. + Endpoint *models.EndpointBatchDeleteRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete endpoint params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteEndpointParams) WithDefaults() *DeleteEndpointParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete endpoint params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteEndpointParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete endpoint params +func (o *DeleteEndpointParams) WithTimeout(timeout time.Duration) *DeleteEndpointParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete endpoint params +func (o *DeleteEndpointParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete endpoint params +func (o *DeleteEndpointParams) WithContext(ctx context.Context) *DeleteEndpointParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete endpoint params +func (o *DeleteEndpointParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete endpoint params +func (o *DeleteEndpointParams) WithHTTPClient(client *http.Client) *DeleteEndpointParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete endpoint params +func (o *DeleteEndpointParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEndpoint adds the endpoint to the delete endpoint params +func (o *DeleteEndpointParams) WithEndpoint(endpoint *models.EndpointBatchDeleteRequest) *DeleteEndpointParams { + o.SetEndpoint(endpoint) + return o +} + +// SetEndpoint adds the endpoint to the delete endpoint params +func (o *DeleteEndpointParams) SetEndpoint(endpoint *models.EndpointBatchDeleteRequest) { + o.Endpoint = endpoint +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Endpoint != nil { + if err := r.SetBodyParam(o.Endpoint); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go new file mode 100644 index 0000000000..36842a86c8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go @@ -0,0 +1,325 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package endpoint + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// DeleteEndpointReader is a Reader for the DeleteEndpoint structure. +type DeleteEndpointReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewDeleteEndpointOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 206: + result := NewDeleteEndpointErrors() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeleteEndpointInvalid() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewDeleteEndpointNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 429: + result := NewDeleteEndpointTooManyRequests() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeleteEndpointOK creates a DeleteEndpointOK with default headers values +func NewDeleteEndpointOK() *DeleteEndpointOK { + return &DeleteEndpointOK{} +} + +/* +DeleteEndpointOK describes a response with status code 200, with default header values. + +Success +*/ +type DeleteEndpointOK struct { +} + +// IsSuccess returns true when this delete endpoint o k response has a 2xx status code +func (o *DeleteEndpointOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete endpoint o k response has a 3xx status code +func (o *DeleteEndpointOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint o k response has a 4xx status code +func (o *DeleteEndpointOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete endpoint o k response has a 5xx status code +func (o *DeleteEndpointOK) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint o k response a status code equal to that given +func (o *DeleteEndpointOK) IsCode(code int) bool { + return code == 200 +} + +func (o *DeleteEndpointOK) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK ", 200) +} + +func (o *DeleteEndpointOK) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK ", 200) +} + +func (o *DeleteEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteEndpointErrors creates a DeleteEndpointErrors with default headers values +func NewDeleteEndpointErrors() *DeleteEndpointErrors { + return &DeleteEndpointErrors{} +} + +/* +DeleteEndpointErrors describes a response with status code 206, with default header values. + +Deleted with a number of errors encountered +*/ +type DeleteEndpointErrors struct { + Payload int64 +} + +// IsSuccess returns true when this delete endpoint errors response has a 2xx status code +func (o *DeleteEndpointErrors) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete endpoint errors response has a 3xx status code +func (o *DeleteEndpointErrors) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint errors response has a 4xx status code +func (o *DeleteEndpointErrors) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete endpoint errors response has a 5xx status code +func (o *DeleteEndpointErrors) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint errors response a status code equal to that given +func (o *DeleteEndpointErrors) IsCode(code int) bool { + return code == 206 +} + +func (o *DeleteEndpointErrors) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %+v", 206, o.Payload) +} + +func (o *DeleteEndpointErrors) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %+v", 206, o.Payload) +} + +func (o *DeleteEndpointErrors) GetPayload() int64 { + return o.Payload +} + +func (o *DeleteEndpointErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteEndpointInvalid creates a DeleteEndpointInvalid with default headers values +func NewDeleteEndpointInvalid() *DeleteEndpointInvalid { + return &DeleteEndpointInvalid{} +} + +/* +DeleteEndpointInvalid describes a response with status code 400, with default header values. + +Invalid endpoint delete request +*/ +type DeleteEndpointInvalid struct { +} + +// IsSuccess returns true when this delete endpoint invalid response has a 2xx status code +func (o *DeleteEndpointInvalid) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete endpoint invalid response has a 3xx status code +func (o *DeleteEndpointInvalid) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint invalid response has a 4xx status code +func (o *DeleteEndpointInvalid) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete endpoint invalid response has a 5xx status code +func (o *DeleteEndpointInvalid) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint invalid response a status code equal to that given +func (o *DeleteEndpointInvalid) IsCode(code int) bool { + return code == 400 +} + +func (o *DeleteEndpointInvalid) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid ", 400) +} + +func (o *DeleteEndpointInvalid) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid ", 400) +} + +func (o *DeleteEndpointInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteEndpointNotFound creates a DeleteEndpointNotFound with default headers values +func NewDeleteEndpointNotFound() *DeleteEndpointNotFound { + return &DeleteEndpointNotFound{} +} + +/* +DeleteEndpointNotFound describes a response with status code 404, with default header values. + +No endpoints with provided parameters found +*/ +type DeleteEndpointNotFound struct { +} + +// IsSuccess returns true when this delete endpoint not found response has a 2xx status code +func (o *DeleteEndpointNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete endpoint not found response has a 3xx status code +func (o *DeleteEndpointNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint not found response has a 4xx status code +func (o *DeleteEndpointNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete endpoint not found response has a 5xx status code +func (o *DeleteEndpointNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint not found response a status code equal to that given +func (o *DeleteEndpointNotFound) IsCode(code int) bool { + return code == 404 +} + +func (o *DeleteEndpointNotFound) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound ", 404) +} + +func (o *DeleteEndpointNotFound) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound ", 404) +} + +func (o *DeleteEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteEndpointTooManyRequests creates a DeleteEndpointTooManyRequests with default headers values +func NewDeleteEndpointTooManyRequests() *DeleteEndpointTooManyRequests { + return &DeleteEndpointTooManyRequests{} +} + +/* +DeleteEndpointTooManyRequests describes a response with status code 429, with default header values. + +Rate-limiting too many requests in the given time frame +*/ +type DeleteEndpointTooManyRequests struct { +} + +// IsSuccess returns true when this delete endpoint too many requests response has a 2xx status code +func (o *DeleteEndpointTooManyRequests) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete endpoint too many requests response has a 3xx status code +func (o *DeleteEndpointTooManyRequests) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint too many requests response has a 4xx status code +func (o *DeleteEndpointTooManyRequests) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete endpoint too many requests response has a 5xx status code +func (o *DeleteEndpointTooManyRequests) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint too many requests response a status code equal to that given +func (o *DeleteEndpointTooManyRequests) IsCode(code int) bool { + return code == 429 +} + +func (o *DeleteEndpointTooManyRequests) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests ", 429) +} + +func (o *DeleteEndpointTooManyRequests) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests ", 429) +} + +func (o *DeleteEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go index 34d3500071..72f7b68eab 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go @@ -33,6 +33,8 @@ type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods type ClientService interface { + DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error) + DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error) GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error) @@ -58,6 +60,47 @@ type ClientService interface { SetTransport(transport runtime.ClientTransport) } +/* +DeleteEndpoint deletes a list of endpoints + +Deletes a list of endpoints that have endpoints matching the provided properties +*/ +func (a *Client) DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteEndpointParams() + } + op := &runtime.ClientOperation{ + ID: "DeleteEndpoint", + Method: "DELETE", + PathPattern: "/endpoint", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &DeleteEndpointReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *DeleteEndpointOK: + return value, nil, nil + case *DeleteEndpointErrors: + return nil, value, nil + } + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* DeleteEndpointID deletes endpoint diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go index 027970d697..971bf69f06 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDConfigParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go index 11b97e7d96..b211692ce6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDHealthzParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go index 9ed2d6d6bf..27dc404abd 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDLabelsParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go index 32bec87cc1..f93b295ca9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDLogParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go index d43637631f..4bad76a970 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go index a1b33a28d6..9ecd8ef8a1 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go @@ -79,9 +79,11 @@ type PatchEndpointIDConfigParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go index db93b22dd1..8091565f55 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go @@ -79,9 +79,11 @@ type PatchEndpointIDLabelsParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go index fc8495b4c1..f718d19823 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go @@ -79,9 +79,11 @@ type PatchEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go index 9692c00137..a7342cba0b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go @@ -79,9 +79,11 @@ type PutEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go index ea3b520e56..e4da10fcae 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go @@ -80,9 +80,11 @@ type GetFqdnCacheIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go index 769f669d83..ef09ec7960 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go @@ -17,6 +17,7 @@ import ( "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // NewPutPolicyParams creates a new PutPolicyParams object, @@ -70,6 +71,18 @@ type PutPolicyParams struct { */ Policy string + /* Replace. + + If true, indicates that existing rules with identical labels should be replaced. + */ + Replace *bool + + /* ReplaceWithLabels. + + If present, indicates that existing rules with the given labels should be deleted. + */ + ReplaceWithLabels []string + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -134,6 +147,28 @@ func (o *PutPolicyParams) SetPolicy(policy string) { o.Policy = policy } +// WithReplace adds the replace to the put policy params +func (o *PutPolicyParams) WithReplace(replace *bool) *PutPolicyParams { + o.SetReplace(replace) + return o +} + +// SetReplace adds the replace to the put policy params +func (o *PutPolicyParams) SetReplace(replace *bool) { + o.Replace = replace +} + +// WithReplaceWithLabels adds the replaceWithLabels to the put policy params +func (o *PutPolicyParams) WithReplaceWithLabels(replaceWithLabels []string) *PutPolicyParams { + o.SetReplaceWithLabels(replaceWithLabels) + return o +} + +// SetReplaceWithLabels adds the replaceWithLabels to the put policy params +func (o *PutPolicyParams) SetReplaceWithLabels(replaceWithLabels []string) { + o.ReplaceWithLabels = replaceWithLabels +} + // WriteToRequest writes these params to a swagger request func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -145,8 +180,53 @@ func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Reg return err } + if o.Replace != nil { + + // query param replace + var qrReplace bool + + if o.Replace != nil { + qrReplace = *o.Replace + } + qReplace := swag.FormatBool(qrReplace) + if qReplace != "" { + + if err := r.SetQueryParam("replace", qReplace); err != nil { + return err + } + } + } + + if o.ReplaceWithLabels != nil { + + // binding items for replace-with-labels + joinedReplaceWithLabels := o.bindParamReplaceWithLabels(reg) + + // query array param replace-with-labels + if err := r.SetQueryParam("replace-with-labels", joinedReplaceWithLabels...); err != nil { + return err + } + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } + +// bindParamPutPolicy binds the parameter replace-with-labels +func (o *PutPolicyParams) bindParamReplaceWithLabels(formats strfmt.Registry) []string { + replaceWithLabelsIR := o.ReplaceWithLabels + + var replaceWithLabelsIC []string + for _, replaceWithLabelsIIR := range replaceWithLabelsIR { // explode []string + + replaceWithLabelsIIV := replaceWithLabelsIIR // string as string + replaceWithLabelsIC = append(replaceWithLabelsIC, replaceWithLabelsIIV) + } + + // items.CollectionFormat: "" + replaceWithLabelsIS := swag.JoinByFormat(replaceWithLabelsIC, "") + + return replaceWithLabelsIS +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_parameters.go new file mode 100644 index 0000000000..b936411eaa --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_parameters.go @@ -0,0 +1,236 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package statedb + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetStatedbQueryTableParams creates a new GetStatedbQueryTableParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetStatedbQueryTableParams() *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetStatedbQueryTableParamsWithTimeout creates a new GetStatedbQueryTableParams object +// with the ability to set a timeout on a request. +func NewGetStatedbQueryTableParamsWithTimeout(timeout time.Duration) *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + timeout: timeout, + } +} + +// NewGetStatedbQueryTableParamsWithContext creates a new GetStatedbQueryTableParams object +// with the ability to set a context for a request. +func NewGetStatedbQueryTableParamsWithContext(ctx context.Context) *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + Context: ctx, + } +} + +// NewGetStatedbQueryTableParamsWithHTTPClient creates a new GetStatedbQueryTableParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetStatedbQueryTableParamsWithHTTPClient(client *http.Client) *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + HTTPClient: client, + } +} + +/* +GetStatedbQueryTableParams contains all the parameters to send to the API endpoint + + for the get statedb query table operation. + + Typically these are written to a http.Request. +*/ +type GetStatedbQueryTableParams struct { + + /* Index. + + StateDB index name + */ + Index string + + /* Key. + + Query key (base64 encoded) + */ + Key string + + /* Lowerbound. + + If true perform a LowerBound search + */ + Lowerbound bool + + /* Table. + + StateDB table name + */ + Table string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get statedb query table params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetStatedbQueryTableParams) WithDefaults() *GetStatedbQueryTableParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get statedb query table params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetStatedbQueryTableParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithTimeout(timeout time.Duration) *GetStatedbQueryTableParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithContext(ctx context.Context) *GetStatedbQueryTableParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithHTTPClient(client *http.Client) *GetStatedbQueryTableParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithIndex adds the index to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithIndex(index string) *GetStatedbQueryTableParams { + o.SetIndex(index) + return o +} + +// SetIndex adds the index to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetIndex(index string) { + o.Index = index +} + +// WithKey adds the key to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithKey(key string) *GetStatedbQueryTableParams { + o.SetKey(key) + return o +} + +// SetKey adds the key to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetKey(key string) { + o.Key = key +} + +// WithLowerbound adds the lowerbound to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithLowerbound(lowerbound bool) *GetStatedbQueryTableParams { + o.SetLowerbound(lowerbound) + return o +} + +// SetLowerbound adds the lowerbound to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetLowerbound(lowerbound bool) { + o.Lowerbound = lowerbound +} + +// WithTable adds the table to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithTable(table string) *GetStatedbQueryTableParams { + o.SetTable(table) + return o +} + +// SetTable adds the table to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetTable(table string) { + o.Table = table +} + +// WriteToRequest writes these params to a swagger request +func (o *GetStatedbQueryTableParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param index + qrIndex := o.Index + qIndex := qrIndex + if qIndex != "" { + + if err := r.SetQueryParam("index", qIndex); err != nil { + return err + } + } + + // query param key + qrKey := o.Key + qKey := qrKey + if qKey != "" { + + if err := r.SetQueryParam("key", qKey); err != nil { + return err + } + } + + // query param lowerbound + qrLowerbound := o.Lowerbound + qLowerbound := swag.FormatBool(qrLowerbound) + if qLowerbound != "" { + + if err := r.SetQueryParam("lowerbound", qLowerbound); err != nil { + return err + } + } + + // path param table + if err := r.SetPathParam("table", o.Table); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_responses.go new file mode 100644 index 0000000000..74bfe66f7f --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_responses.go @@ -0,0 +1,227 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package statedb + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetStatedbQueryTableReader is a Reader for the GetStatedbQueryTable structure. +type GetStatedbQueryTableReader struct { + formats strfmt.Registry + writer io.Writer +} + +// ReadResponse reads a server response into the received o. +func (o *GetStatedbQueryTableReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetStatedbQueryTableOK(o.writer) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetStatedbQueryTableBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetStatedbQueryTableNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetStatedbQueryTableOK creates a GetStatedbQueryTableOK with default headers values +func NewGetStatedbQueryTableOK(writer io.Writer) *GetStatedbQueryTableOK { + return &GetStatedbQueryTableOK{ + + Payload: writer, + } +} + +/* +GetStatedbQueryTableOK describes a response with status code 200, with default header values. + +Success +*/ +type GetStatedbQueryTableOK struct { + Payload io.Writer +} + +// IsSuccess returns true when this get statedb query table o k response has a 2xx status code +func (o *GetStatedbQueryTableOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get statedb query table o k response has a 3xx status code +func (o *GetStatedbQueryTableOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get statedb query table o k response has a 4xx status code +func (o *GetStatedbQueryTableOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get statedb query table o k response has a 5xx status code +func (o *GetStatedbQueryTableOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get statedb query table o k response a status code equal to that given +func (o *GetStatedbQueryTableOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetStatedbQueryTableOK) Error() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableOK %+v", 200, o.Payload) +} + +func (o *GetStatedbQueryTableOK) String() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableOK %+v", 200, o.Payload) +} + +func (o *GetStatedbQueryTableOK) GetPayload() io.Writer { + return o.Payload +} + +func (o *GetStatedbQueryTableOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetStatedbQueryTableBadRequest creates a GetStatedbQueryTableBadRequest with default headers values +func NewGetStatedbQueryTableBadRequest() *GetStatedbQueryTableBadRequest { + return &GetStatedbQueryTableBadRequest{} +} + +/* +GetStatedbQueryTableBadRequest describes a response with status code 400, with default header values. + +Invalid parameters +*/ +type GetStatedbQueryTableBadRequest struct { + Payload models.Error +} + +// IsSuccess returns true when this get statedb query table bad request response has a 2xx status code +func (o *GetStatedbQueryTableBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get statedb query table bad request response has a 3xx status code +func (o *GetStatedbQueryTableBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get statedb query table bad request response has a 4xx status code +func (o *GetStatedbQueryTableBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get statedb query table bad request response has a 5xx status code +func (o *GetStatedbQueryTableBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get statedb query table bad request response a status code equal to that given +func (o *GetStatedbQueryTableBadRequest) IsCode(code int) bool { + return code == 400 +} + +func (o *GetStatedbQueryTableBadRequest) Error() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableBadRequest %+v", 400, o.Payload) +} + +func (o *GetStatedbQueryTableBadRequest) String() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableBadRequest %+v", 400, o.Payload) +} + +func (o *GetStatedbQueryTableBadRequest) GetPayload() models.Error { + return o.Payload +} + +func (o *GetStatedbQueryTableBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetStatedbQueryTableNotFound creates a GetStatedbQueryTableNotFound with default headers values +func NewGetStatedbQueryTableNotFound() *GetStatedbQueryTableNotFound { + return &GetStatedbQueryTableNotFound{} +} + +/* +GetStatedbQueryTableNotFound describes a response with status code 404, with default header values. + +Table or Index not found +*/ +type GetStatedbQueryTableNotFound struct { +} + +// IsSuccess returns true when this get statedb query table not found response has a 2xx status code +func (o *GetStatedbQueryTableNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get statedb query table not found response has a 3xx status code +func (o *GetStatedbQueryTableNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get statedb query table not found response has a 4xx status code +func (o *GetStatedbQueryTableNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get statedb query table not found response has a 5xx status code +func (o *GetStatedbQueryTableNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get statedb query table not found response a status code equal to that given +func (o *GetStatedbQueryTableNotFound) IsCode(code int) bool { + return code == 404 +} + +func (o *GetStatedbQueryTableNotFound) Error() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableNotFound ", 404) +} + +func (o *GetStatedbQueryTableNotFound) String() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableNotFound ", 404) +} + +func (o *GetStatedbQueryTableNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go index 69365c913f..4009fff8ed 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go @@ -36,6 +36,8 @@ type ClientOption func(*runtime.ClientOperation) type ClientService interface { GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, opts ...ClientOption) (*GetStatedbDumpOK, error) + GetStatedbQueryTable(params *GetStatedbQueryTableParams, writer io.Writer, opts ...ClientOption) (*GetStatedbQueryTableOK, error) + SetTransport(transport runtime.ClientTransport) } @@ -77,6 +79,44 @@ func (a *Client) GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, panic(msg) } +/* +GetStatedbQueryTable performs a query against a state d b table +*/ +func (a *Client) GetStatedbQueryTable(params *GetStatedbQueryTableParams, writer io.Writer, opts ...ClientOption) (*GetStatedbQueryTableOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetStatedbQueryTableParams() + } + op := &runtime.ClientOperation{ + ID: "GetStatedbQueryTable", + Method: "GET", + PathPattern: "/statedb/query/{table}", + ProducesMediaTypes: []string{"application/octet-stream"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetStatedbQueryTableReader{formats: a.formats, writer: writer}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetStatedbQueryTableOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetStatedbQueryTable: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + // SetTransport changes the transport on the client func (a *Client) SetTransport(transport runtime.ClientTransport) { a.transport = transport diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/README.md b/vendor/github.com/cilium/cilium/api/v1/flow/README.md new file mode 100644 index 0000000000..f96ebd64f1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/flow/README.md @@ -0,0 +1,1176 @@ +# Protocol Documentation + + +## Table of Contents + +- [flow/flow.proto](#flow_flow-proto) + - [AgentEvent](#flow-AgentEvent) + - [AgentEventUnknown](#flow-AgentEventUnknown) + - [CiliumEventType](#flow-CiliumEventType) + - [DNS](#flow-DNS) + - [DebugEvent](#flow-DebugEvent) + - [Endpoint](#flow-Endpoint) + - [EndpointRegenNotification](#flow-EndpointRegenNotification) + - [EndpointUpdateNotification](#flow-EndpointUpdateNotification) + - [Ethernet](#flow-Ethernet) + - [EventTypeFilter](#flow-EventTypeFilter) + - [Flow](#flow-Flow) + - [FlowFilter](#flow-FlowFilter) + - [HTTP](#flow-HTTP) + - [HTTPHeader](#flow-HTTPHeader) + - [ICMPv4](#flow-ICMPv4) + - [ICMPv6](#flow-ICMPv6) + - [IP](#flow-IP) + - [IPCacheNotification](#flow-IPCacheNotification) + - [Kafka](#flow-Kafka) + - [Layer4](#flow-Layer4) + - [Layer7](#flow-Layer7) + - [LostEvent](#flow-LostEvent) + - [NetworkInterface](#flow-NetworkInterface) + - [Policy](#flow-Policy) + - [PolicyUpdateNotification](#flow-PolicyUpdateNotification) + - [SCTP](#flow-SCTP) + - [Service](#flow-Service) + - [ServiceDeleteNotification](#flow-ServiceDeleteNotification) + - [ServiceUpsertNotification](#flow-ServiceUpsertNotification) + - [ServiceUpsertNotificationAddr](#flow-ServiceUpsertNotificationAddr) + - [TCP](#flow-TCP) + - [TCPFlags](#flow-TCPFlags) + - [TimeNotification](#flow-TimeNotification) + - [TraceContext](#flow-TraceContext) + - [TraceParent](#flow-TraceParent) + - [UDP](#flow-UDP) + - [Workload](#flow-Workload) + + - [AgentEventType](#flow-AgentEventType) + - [AuthType](#flow-AuthType) + - [DebugCapturePoint](#flow-DebugCapturePoint) + - [DebugEventType](#flow-DebugEventType) + - [DropReason](#flow-DropReason) + - [EventType](#flow-EventType) + - [FlowType](#flow-FlowType) + - [IPVersion](#flow-IPVersion) + - [L7FlowType](#flow-L7FlowType) + - [LostEventSource](#flow-LostEventSource) + - [SocketTranslationPoint](#flow-SocketTranslationPoint) + - [TraceObservationPoint](#flow-TraceObservationPoint) + - [TrafficDirection](#flow-TrafficDirection) + - [Verdict](#flow-Verdict) + +- [Scalar Value Types](#scalar-value-types) + + + + +

Top

+ +## flow/flow.proto + + + + + +### AgentEvent + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [AgentEventType](#flow-AgentEventType) | | | +| unknown | [AgentEventUnknown](#flow-AgentEventUnknown) | | | +| agent_start | [TimeNotification](#flow-TimeNotification) | | | +| policy_update | [PolicyUpdateNotification](#flow-PolicyUpdateNotification) | | used for POLICY_UPDATED and POLICY_DELETED | +| endpoint_regenerate | [EndpointRegenNotification](#flow-EndpointRegenNotification) | | used for ENDPOINT_REGENERATE_SUCCESS and ENDPOINT_REGENERATE_FAILURE | +| endpoint_update | [EndpointUpdateNotification](#flow-EndpointUpdateNotification) | | used for ENDPOINT_CREATED and ENDPOINT_DELETED | +| ipcache_update | [IPCacheNotification](#flow-IPCacheNotification) | | used for IPCACHE_UPSERTED and IPCACHE_DELETED | +| service_upsert | [ServiceUpsertNotification](#flow-ServiceUpsertNotification) | | | +| service_delete | [ServiceDeleteNotification](#flow-ServiceDeleteNotification) | | | + + + + + + + + +### AgentEventUnknown + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [string](#string) | | | +| notification | [string](#string) | | | + + + + + + + + +### CiliumEventType +CiliumEventType from which the flow originated. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [int32](#int32) | | type of event the flow originated from, i.e. github.com/cilium/cilium/pkg/monitor/api.MessageType* | +| sub_type | [int32](#int32) | | sub_type may indicate more details depending on type, e.g. - github.com/cilium/cilium/pkg/monitor/api.Trace* - github.com/cilium/cilium/pkg/monitor/api.Drop* - github.com/cilium/cilium/pkg/monitor/api.DbgCapture* | + + + + + + + + +### DNS +DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264): + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| query | [string](#string) | | DNS name that's being looked up: e.g. "isovalent.com." | +| ips | [string](#string) | repeated | List of IP addresses in the DNS response. | +| ttl | [uint32](#uint32) | | TTL in the DNS response. | +| cnames | [string](#string) | repeated | List of CNames in the DNS response. | +| observation_source | [string](#string) | | Corresponds to DNSDataSource defined in: https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L253 | +| rcode | [uint32](#uint32) | | Return code of the DNS request defined in: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6 | +| qtypes | [string](#string) | repeated | String representation of qtypes defined in: https://tools.ietf.org/html/rfc1035#section-3.2.3 | +| rrtypes | [string](#string) | repeated | String representation of rrtypes defined in: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4 | + + + + + + + + +### DebugEvent + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [DebugEventType](#flow-DebugEventType) | | | +| source | [Endpoint](#flow-Endpoint) | | | +| hash | [google.protobuf.UInt32Value](#google-protobuf-UInt32Value) | | | +| arg1 | [google.protobuf.UInt32Value](#google-protobuf-UInt32Value) | | | +| arg2 | [google.protobuf.UInt32Value](#google-protobuf-UInt32Value) | | | +| arg3 | [google.protobuf.UInt32Value](#google-protobuf-UInt32Value) | | | +| message | [string](#string) | | | +| cpu | [google.protobuf.Int32Value](#google-protobuf-Int32Value) | | | + + + + + + + + +### Endpoint + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ID | [uint32](#uint32) | | | +| identity | [uint32](#uint32) | | | +| namespace | [string](#string) | | | +| labels | [string](#string) | repeated | labels in `foo=bar` format. | +| pod_name | [string](#string) | | | +| workloads | [Workload](#flow-Workload) | repeated | | + + + + + + + + +### EndpointRegenNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [uint64](#uint64) | | | +| labels | [string](#string) | repeated | | +| error | [string](#string) | | | + + + + + + + + +### EndpointUpdateNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [uint64](#uint64) | | | +| labels | [string](#string) | repeated | | +| error | [string](#string) | | | +| pod_name | [string](#string) | | | +| namespace | [string](#string) | | | + + + + + + + + +### Ethernet + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source | [string](#string) | | | +| destination | [string](#string) | | | + + + + + + + + +### EventTypeFilter +EventTypeFilter is a filter describing a particular event type. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [int32](#int32) | | type is the primary flow type as defined by: github.com/cilium/cilium/pkg/monitor/api.MessageType* | +| match_sub_type | [bool](#bool) | | match_sub_type is set to true when matching on the sub_type should be done. This flag is required as 0 is a valid sub_type. | +| sub_type | [int32](#int32) | | sub_type is the secondary type, e.g. - github.com/cilium/cilium/pkg/monitor/api.Trace* | + + + + + + + + +### Flow + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| time | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | | +| uuid | [string](#string) | | uuid is a universally unique identifier for this flow. | +| verdict | [Verdict](#flow-Verdict) | | | +| drop_reason | [uint32](#uint32) | | **Deprecated.** only applicable to Verdict = DROPPED. deprecated in favor of drop_reason_desc. | +| auth_type | [AuthType](#flow-AuthType) | | auth_type is the authentication type specified for the flow in Cilium Network Policy. Only set on policy verdict events. | +| ethernet | [Ethernet](#flow-Ethernet) | | l2 | +| IP | [IP](#flow-IP) | | l3 | +| l4 | [Layer4](#flow-Layer4) | | l4 | +| source | [Endpoint](#flow-Endpoint) | | | +| destination | [Endpoint](#flow-Endpoint) | | | +| Type | [FlowType](#flow-FlowType) | | | +| node_name | [string](#string) | | NodeName is the name of the node from which this Flow was captured. | +| source_names | [string](#string) | repeated | all names the source IP can have. | +| destination_names | [string](#string) | repeated | all names the destination IP can have. | +| l7 | [Layer7](#flow-Layer7) | | L7 information. This field is set if and only if FlowType is L7. | +| reply | [bool](#bool) | | **Deprecated.** Deprecated. This suffers from false negatives due to protobuf not being able to distinguish between the value being false or it being absent. Please use is_reply instead. | +| event_type | [CiliumEventType](#flow-CiliumEventType) | | EventType of the originating Cilium event | +| source_service | [Service](#flow-Service) | | source_service contains the service name of the source | +| destination_service | [Service](#flow-Service) | | destination_service contains the service name of the destination | +| traffic_direction | [TrafficDirection](#flow-TrafficDirection) | | traffic_direction of the connection, e.g. ingress or egress | +| policy_match_type | [uint32](#uint32) | | policy_match_type is only applicable to the cilium event type PolicyVerdict https://github.com/cilium/cilium/blob/e831859b5cc336c6d964a6d35bbd34d1840e21b9/pkg/monitor/datapath_policy.go#L50 | +| trace_observation_point | [TraceObservationPoint](#flow-TraceObservationPoint) | | Only applicable to cilium trace notifications, blank for other types. | +| drop_reason_desc | [DropReason](#flow-DropReason) | | only applicable to Verdict = DROPPED. | +| is_reply | [google.protobuf.BoolValue](#google-protobuf-BoolValue) | | is_reply indicates that this was a packet (L4) or message (L7) in the reply direction. May be absent (in which case it is unknown whether it is a reply or not). | +| debug_capture_point | [DebugCapturePoint](#flow-DebugCapturePoint) | | Only applicable to cilium debug capture events, blank for other types | +| interface | [NetworkInterface](#flow-NetworkInterface) | | interface is the network interface on which this flow was observed | +| proxy_port | [uint32](#uint32) | | proxy_port indicates the port of the proxy to which the flow was forwarded | +| trace_context | [TraceContext](#flow-TraceContext) | | trace_context contains information about a trace related to the flow, if any. | +| sock_xlate_point | [SocketTranslationPoint](#flow-SocketTranslationPoint) | | sock_xlate_point is the socket translation point. Only applicable to TraceSock notifications, blank for other types | +| socket_cookie | [uint64](#uint64) | | socket_cookie is the Linux kernel socket cookie for this flow. Only applicable to TraceSock notifications, zero for other types | +| cgroup_id | [uint64](#uint64) | | cgroup_id of the process which emitted this event. Only applicable to TraceSock notifications, zero for other types | +| Summary | [string](#string) | | **Deprecated.** This is a temporary workaround to support summary field for pb.Flow without duplicating logic from the old parser. This field will be removed once we fully migrate to the new parser. | +| extensions | [google.protobuf.Any](#google-protobuf-Any) | | extensions can be used to add arbitrary additional metadata to flows. This can be used to extend functionality for other Hubble compatible APIs, or experiment with new functionality without needing to change the public API. | +| egress_allowed_by | [Policy](#flow-Policy) | repeated | The CiliumNetworkPolicies allowing the egress of the flow. | +| ingress_allowed_by | [Policy](#flow-Policy) | repeated | The CiliumNetworkPolicies allowing the ingress of the flow. | + + + + + + + + +### FlowFilter +FlowFilter represent an individual flow filter. All fields are optional. If +multiple fields are set, then all fields must match for the filter to match. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | repeated | uuid filters by a list of flow uuids. | +| source_ip | [string](#string) | repeated | source_ip filters by a list of source ips. Each of the source ips can be specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g. "1.1.1.0/24"). | +| source_pod | [string](#string) | repeated | source_pod filters by a list of source pod name prefixes, optionally within a given namespace (e.g. "xwing", "kube-system/coredns-"). The pod name can be omitted to only filter by namespace (e.g. "kube-system/") or the namespace can be omitted to filter for pods in any namespace (e.g. "/xwing") | +| source_fqdn | [string](#string) | repeated | source_fqdn filters by a list of source fully qualified domain names | +| source_label | [string](#string) | repeated | source_labels filters on a list of source label selectors. Selectors support the full Kubernetes label selector syntax. | +| source_service | [string](#string) | repeated | source_service filters on a list of source service names. This field supports the same syntax as the source_pod field. | +| source_workload | [Workload](#flow-Workload) | repeated | source_workload filters by a list of source workload. | +| destination_ip | [string](#string) | repeated | destination_ip filters by a list of destination ips. Each of the destination ips can be specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g. "1.1.1.0/24"). | +| destination_pod | [string](#string) | repeated | destination_pod filters by a list of destination pod names | +| destination_fqdn | [string](#string) | repeated | destination_fqdn filters by a list of destination fully qualified domain names | +| destination_label | [string](#string) | repeated | destination_label filters on a list of destination label selectors | +| destination_service | [string](#string) | repeated | destination_service filters on a list of destination service names | +| destination_workload | [Workload](#flow-Workload) | repeated | destination_workload filters by a list of destination workload. | +| traffic_direction | [TrafficDirection](#flow-TrafficDirection) | repeated | traffic_direction filters flow by direction of the connection, e.g. ingress or egress. | +| verdict | [Verdict](#flow-Verdict) | repeated | only return Flows that were classified with a particular verdict. | +| event_type | [EventTypeFilter](#flow-EventTypeFilter) | repeated | event_type is the list of event types to filter on | +| http_status_code | [string](#string) | repeated | http_status_code is a list of string prefixes (e.g. "4+", "404", "5+") to filter on the HTTP status code | +| protocol | [string](#string) | repeated | protocol filters flows by L4 or L7 protocol, e.g. (e.g. "tcp", "http") | +| source_port | [string](#string) | repeated | source_port filters flows by L4 source port | +| destination_port | [string](#string) | repeated | destination_port filters flows by L4 destination port | +| reply | [bool](#bool) | repeated | reply filters flows based on the direction of the flow. | +| dns_query | [string](#string) | repeated | dns_query filters L7 DNS flows by query patterns (RE2 regex), e.g. 'kube.*local'. | +| source_identity | [uint32](#uint32) | repeated | source_identity filters by the security identity of the source endpoint. | +| destination_identity | [uint32](#uint32) | repeated | destination_identity filters by the security identity of the destination endpoint. | +| http_method | [string](#string) | repeated | GET, POST, PUT, etc. methods. This type of field is well suited for an enum but every single existing place is using a string already. | +| http_path | [string](#string) | repeated | http_path is a list of regular expressions to filter on the HTTP path. | +| http_url | [string](#string) | repeated | http_url is a list of regular expressions to filter on the HTTP URL. | +| http_header | [HTTPHeader](#flow-HTTPHeader) | repeated | http_header is a list of key:value pairs to filter on the HTTP headers. | +| tcp_flags | [TCPFlags](#flow-TCPFlags) | repeated | tcp_flags filters flows based on TCP header flags | +| node_name | [string](#string) | repeated | node_name is a list of patterns to filter on the node name, e.g. "k8s*", "test-cluster/*.domain.com", "cluster-name/" etc. | +| ip_version | [IPVersion](#flow-IPVersion) | repeated | filter based on IP version (ipv4 or ipv6) | +| trace_id | [string](#string) | repeated | trace_id filters flows by trace ID | + + + + + + + + +### HTTP +L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| code | [uint32](#uint32) | | | +| method | [string](#string) | | | +| url | [string](#string) | | | +| protocol | [string](#string) | | | +| headers | [HTTPHeader](#flow-HTTPHeader) | repeated | | + + + + + + + + +### HTTPHeader + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + + + + + + +### ICMPv4 + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [uint32](#uint32) | | | +| code | [uint32](#uint32) | | | + + + + + + + + +### ICMPv6 + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [uint32](#uint32) | | | +| code | [uint32](#uint32) | | | + + + + + + + + +### IP + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source | [string](#string) | | | +| destination | [string](#string) | | | +| ipVersion | [IPVersion](#flow-IPVersion) | | | +| encrypted | [bool](#bool) | | This field indicates whether the TraceReasonEncryptMask is set or not. https://github.com/cilium/cilium/blob/ba0ed147bd5bb342f67b1794c2ad13c6e99d5236/pkg/monitor/datapath_trace.go#L27 | + + + + + + + + +### IPCacheNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| cidr | [string](#string) | | | +| identity | [uint32](#uint32) | | | +| old_identity | [google.protobuf.UInt32Value](#google-protobuf-UInt32Value) | | | +| host_ip | [string](#string) | | | +| old_host_ip | [string](#string) | | | +| encrypt_key | [uint32](#uint32) | | | +| namespace | [string](#string) | | | +| pod_name | [string](#string) | | | + + + + + + + + +### Kafka +L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error_code | [int32](#int32) | | | +| api_version | [int32](#int32) | | | +| api_key | [string](#string) | | | +| correlation_id | [int32](#int32) | | | +| topic | [string](#string) | | | + + + + + + + + +### Layer4 + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| TCP | [TCP](#flow-TCP) | | | +| UDP | [UDP](#flow-UDP) | | | +| ICMPv4 | [ICMPv4](#flow-ICMPv4) | | ICMP is technically not L4, but mutually exclusive with the above | +| ICMPv6 | [ICMPv6](#flow-ICMPv6) | | | +| SCTP | [SCTP](#flow-SCTP) | | | + + + + + + + + +### Layer7 +Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141): + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [L7FlowType](#flow-L7FlowType) | | | +| latency_ns | [uint64](#uint64) | | Latency of the response | +| dns | [DNS](#flow-DNS) | | | +| http | [HTTP](#flow-HTTP) | | | +| kafka | [Kafka](#flow-Kafka) | | | + + + + + + + + +### LostEvent +LostEvent is a message which notifies consumers about a loss of events +that happened before the events were captured by Hubble. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source | [LostEventSource](#flow-LostEventSource) | | source is the location where events got lost. | +| num_events_lost | [uint64](#uint64) | | num_events_lost is the number of events that haven been lost at source. | +| cpu | [google.protobuf.Int32Value](#google-protobuf-Int32Value) | | cpu on which the event was lost if the source of lost events is PERF_EVENT_RING_BUFFER. | + + + + + + + + +### NetworkInterface + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| index | [uint32](#uint32) | | | +| name | [string](#string) | | | + + + + + + + + +### Policy + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| namespace | [string](#string) | | | +| labels | [string](#string) | repeated | | +| revision | [uint64](#uint64) | | | + + + + + + + + +### PolicyUpdateNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| labels | [string](#string) | repeated | | +| revision | [uint64](#uint64) | | | +| rule_count | [int64](#int64) | | | + + + + + + + + +### SCTP + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source_port | [uint32](#uint32) | | | +| destination_port | [uint32](#uint32) | | | + + + + + + + + +### Service + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| namespace | [string](#string) | | | + + + + + + + + +### ServiceDeleteNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [uint32](#uint32) | | | + + + + + + + + +### ServiceUpsertNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [uint32](#uint32) | | | +| frontend_address | [ServiceUpsertNotificationAddr](#flow-ServiceUpsertNotificationAddr) | | | +| backend_addresses | [ServiceUpsertNotificationAddr](#flow-ServiceUpsertNotificationAddr) | repeated | | +| type | [string](#string) | | | +| traffic_policy | [string](#string) | | **Deprecated.** | +| name | [string](#string) | | | +| namespace | [string](#string) | | | +| ext_traffic_policy | [string](#string) | | | +| int_traffic_policy | [string](#string) | | | + + + + + + + + +### ServiceUpsertNotificationAddr + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ip | [string](#string) | | | +| port | [uint32](#uint32) | | | + + + + + + + + +### TCP + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source_port | [uint32](#uint32) | | | +| destination_port | [uint32](#uint32) | | | +| flags | [TCPFlags](#flow-TCPFlags) | | | + + + + + + + + +### TCPFlags + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| FIN | [bool](#bool) | | | +| SYN | [bool](#bool) | | | +| RST | [bool](#bool) | | | +| PSH | [bool](#bool) | | | +| ACK | [bool](#bool) | | | +| URG | [bool](#bool) | | | +| ECE | [bool](#bool) | | | +| CWR | [bool](#bool) | | | +| NS | [bool](#bool) | | | + + + + + + + + +### TimeNotification + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| time | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | | + + + + + + + + +### TraceContext +TraceContext contains trace context propagation data, i.e. information about a +distributed trace. +For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/). + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| parent | [TraceParent](#flow-TraceParent) | | parent identifies the incoming request in a tracing system. | + + + + + + + + +### TraceParent +TraceParent identifies the incoming request in a tracing system. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| trace_id | [string](#string) | | trace_id is a unique value that identifies a trace. It is a byte array represented as a hex string. | + + + + + + + + +### UDP + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| source_port | [uint32](#uint32) | | | +| destination_port | [uint32](#uint32) | | | + + + + + + + + +### Workload + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| kind | [string](#string) | | | + + + + + + + + + + +### AgentEventType +AgentEventType is the type of agent event. These values are shared with type +AgentNotification in pkg/monitor/api/types.go. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| AGENT_EVENT_UNKNOWN | 0 | | +| AGENT_STARTED | 2 | | +| POLICY_UPDATED | 3 | | +| POLICY_DELETED | 4 | | +| ENDPOINT_REGENERATE_SUCCESS | 5 | | +| ENDPOINT_REGENERATE_FAILURE | 6 | | +| ENDPOINT_CREATED | 7 | | +| ENDPOINT_DELETED | 8 | | +| IPCACHE_UPSERTED | 9 | | +| IPCACHE_DELETED | 10 | | +| SERVICE_UPSERTED | 11 | | +| SERVICE_DELETED | 12 | | + + + + + +### AuthType +These types correspond to definitions in pkg/policy/l4.go. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DISABLED | 0 | | +| SPIRE | 1 | | +| TEST_ALWAYS_FAIL | 2 | | + + + + + +### DebugCapturePoint +These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DBG_CAPTURE_POINT_UNKNOWN | 0 | | +| DBG_CAPTURE_DELIVERY | 4 | | +| DBG_CAPTURE_FROM_LB | 5 | | +| DBG_CAPTURE_AFTER_V46 | 6 | | +| DBG_CAPTURE_AFTER_V64 | 7 | | +| DBG_CAPTURE_PROXY_PRE | 8 | | +| DBG_CAPTURE_PROXY_POST | 9 | | +| DBG_CAPTURE_SNAT_PRE | 10 | | +| DBG_CAPTURE_SNAT_POST | 11 | | + + + + + +### DebugEventType +These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DBG_EVENT_UNKNOWN | 0 | | +| DBG_GENERIC | 1 | | +| DBG_LOCAL_DELIVERY | 2 | | +| DBG_ENCAP | 3 | | +| DBG_LXC_FOUND | 4 | | +| DBG_POLICY_DENIED | 5 | | +| DBG_CT_LOOKUP | 6 | | +| DBG_CT_LOOKUP_REV | 7 | | +| DBG_CT_MATCH | 8 | | +| DBG_CT_CREATED | 9 | | +| DBG_CT_CREATED2 | 10 | | +| DBG_ICMP6_HANDLE | 11 | | +| DBG_ICMP6_REQUEST | 12 | | +| DBG_ICMP6_NS | 13 | | +| DBG_ICMP6_TIME_EXCEEDED | 14 | | +| DBG_CT_VERDICT | 15 | | +| DBG_DECAP | 16 | | +| DBG_PORT_MAP | 17 | | +| DBG_ERROR_RET | 18 | | +| DBG_TO_HOST | 19 | | +| DBG_TO_STACK | 20 | | +| DBG_PKT_HASH | 21 | | +| DBG_LB6_LOOKUP_FRONTEND | 22 | | +| DBG_LB6_LOOKUP_FRONTEND_FAIL | 23 | | +| DBG_LB6_LOOKUP_BACKEND_SLOT | 24 | | +| DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS | 25 | | +| DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL | 26 | | +| DBG_LB6_LOOKUP_BACKEND_FAIL | 27 | | +| DBG_LB6_REVERSE_NAT_LOOKUP | 28 | | +| DBG_LB6_REVERSE_NAT | 29 | | +| DBG_LB4_LOOKUP_FRONTEND | 30 | | +| DBG_LB4_LOOKUP_FRONTEND_FAIL | 31 | | +| DBG_LB4_LOOKUP_BACKEND_SLOT | 32 | | +| DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS | 33 | | +| DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL | 34 | | +| DBG_LB4_LOOKUP_BACKEND_FAIL | 35 | | +| DBG_LB4_REVERSE_NAT_LOOKUP | 36 | | +| DBG_LB4_REVERSE_NAT | 37 | | +| DBG_LB4_LOOPBACK_SNAT | 38 | | +| DBG_LB4_LOOPBACK_SNAT_REV | 39 | | +| DBG_CT_LOOKUP4 | 40 | | +| DBG_RR_BACKEND_SLOT_SEL | 41 | | +| DBG_REV_PROXY_LOOKUP | 42 | | +| DBG_REV_PROXY_FOUND | 43 | | +| DBG_REV_PROXY_UPDATE | 44 | | +| DBG_L4_POLICY | 45 | | +| DBG_NETDEV_IN_CLUSTER | 46 | | +| DBG_NETDEV_ENCAP4 | 47 | | +| DBG_CT_LOOKUP4_1 | 48 | | +| DBG_CT_LOOKUP4_2 | 49 | | +| DBG_CT_CREATED4 | 50 | | +| DBG_CT_LOOKUP6_1 | 51 | | +| DBG_CT_LOOKUP6_2 | 52 | | +| DBG_CT_CREATED6 | 53 | | +| DBG_SKIP_PROXY | 54 | | +| DBG_L4_CREATE | 55 | | +| DBG_IP_ID_MAP_FAILED4 | 56 | | +| DBG_IP_ID_MAP_FAILED6 | 57 | | +| DBG_IP_ID_MAP_SUCCEED4 | 58 | | +| DBG_IP_ID_MAP_SUCCEED6 | 59 | | +| DBG_LB_STALE_CT | 60 | | +| DBG_INHERIT_IDENTITY | 61 | | +| DBG_SK_LOOKUP4 | 62 | | +| DBG_SK_LOOKUP6 | 63 | | +| DBG_SK_ASSIGN | 64 | | +| DBG_L7_LB | 65 | | +| DBG_SKIP_POLICY | 66 | | + + + + + +### DropReason +These values are shared with pkg/monitor/api/drop.go and bpf/lib/common.h. +Note that non-drop reasons (i.e. values less than api.DropMin) are not used +here. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DROP_REASON_UNKNOWN | 0 | non-drop reasons | +| INVALID_SOURCE_MAC | 130 | drop reasons | +| INVALID_DESTINATION_MAC | 131 | | +| INVALID_SOURCE_IP | 132 | | +| POLICY_DENIED | 133 | | +| INVALID_PACKET_DROPPED | 134 | | +| CT_TRUNCATED_OR_INVALID_HEADER | 135 | | +| CT_MISSING_TCP_ACK_FLAG | 136 | | +| CT_UNKNOWN_L4_PROTOCOL | 137 | | +| CT_CANNOT_CREATE_ENTRY_FROM_PACKET | 138 | | +| UNSUPPORTED_L3_PROTOCOL | 139 | | +| MISSED_TAIL_CALL | 140 | | +| ERROR_WRITING_TO_PACKET | 141 | | +| UNKNOWN_L4_PROTOCOL | 142 | | +| UNKNOWN_ICMPV4_CODE | 143 | | +| UNKNOWN_ICMPV4_TYPE | 144 | | +| UNKNOWN_ICMPV6_CODE | 145 | | +| UNKNOWN_ICMPV6_TYPE | 146 | | +| ERROR_RETRIEVING_TUNNEL_KEY | 147 | | +| ERROR_RETRIEVING_TUNNEL_OPTIONS | 148 | | +| INVALID_GENEVE_OPTION | 149 | | +| UNKNOWN_L3_TARGET_ADDRESS | 150 | | +| STALE_OR_UNROUTABLE_IP | 151 | | +| NO_MATCHING_LOCAL_CONTAINER_FOUND | 152 | | +| ERROR_WHILE_CORRECTING_L3_CHECKSUM | 153 | | +| ERROR_WHILE_CORRECTING_L4_CHECKSUM | 154 | | +| CT_MAP_INSERTION_FAILED | 155 | | +| INVALID_IPV6_EXTENSION_HEADER | 156 | | +| IP_FRAGMENTATION_NOT_SUPPORTED | 157 | | +| SERVICE_BACKEND_NOT_FOUND | 158 | | +| NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT | 160 | | +| FAILED_TO_INSERT_INTO_PROXYMAP | 161 | | +| REACHED_EDT_RATE_LIMITING_DROP_HORIZON | 162 | | +| UNKNOWN_CONNECTION_TRACKING_STATE | 163 | | +| LOCAL_HOST_IS_UNREACHABLE | 164 | | +| NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION | 165 | | +| UNSUPPORTED_L2_PROTOCOL | 166 | | +| NO_MAPPING_FOR_NAT_MASQUERADE | 167 | | +| UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE | 168 | | +| FIB_LOOKUP_FAILED | 169 | | +| ENCAPSULATION_TRAFFIC_IS_PROHIBITED | 170 | | +| INVALID_IDENTITY | 171 | | +| UNKNOWN_SENDER | 172 | | +| NAT_NOT_NEEDED | 173 | | +| IS_A_CLUSTERIP | 174 | | +| FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND | 175 | | +| FORBIDDEN_ICMPV6_MESSAGE | 176 | | +| DENIED_BY_LB_SRC_RANGE_CHECK | 177 | | +| SOCKET_LOOKUP_FAILED | 178 | | +| SOCKET_ASSIGN_FAILED | 179 | | +| PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL | 180 | | +| POLICY_DENY | 181 | | +| VLAN_FILTERED | 182 | | +| INVALID_VNI | 183 | | +| INVALID_TC_BUFFER | 184 | | +| NO_SID | 185 | | +| MISSING_SRV6_STATE | 186 | | +| NAT46 | 187 | | +| NAT64 | 188 | | +| AUTH_REQUIRED | 189 | | +| CT_NO_MAP_FOUND | 190 | | +| SNAT_NO_MAP_FOUND | 191 | | +| INVALID_CLUSTER_ID | 192 | | +| UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP | 193 | | +| NO_EGRESS_GATEWAY | 194 | | +| UNENCRYPTED_TRAFFIC | 195 | | +| TTL_EXCEEDED | 196 | | +| NO_NODE_ID | 197 | | +| DROP_RATE_LIMITED | 198 | | +| IGMP_HANDLED | 199 | | +| IGMP_SUBSCRIBED | 200 | | +| MULTICAST_HANDLED | 201 | | +| DROP_HOST_NOT_READY | 202 | A BPF program wants to tail call into bpf_host, but the host datapath hasn't been loaded yet. | +| DROP_EP_NOT_READY | 203 | A BPF program wants to tail call some endpoint's policy program in the POLICY_CALL_MAP, but the program is not available. | + + + + + +### EventType +EventType are constants are based on the ones from <linux/perf_event.h>. + +| Name | Number | Description | +| ---- | ------ | ----------- | +| UNKNOWN | 0 | | +| EventSample | 9 | EventSample is equivalent to PERF_RECORD_SAMPLE. | +| RecordLost | 2 | RecordLost is equivalent to PERF_RECORD_LOST. | + + + + + +### FlowType + + +| Name | Number | Description | +| ---- | ------ | ----------- | +| UNKNOWN_TYPE | 0 | | +| L3_L4 | 1 | not sure about the underscore here, but `L34` also reads strange | +| L7 | 2 | | +| SOCK | 3 | | + + + + + +### IPVersion + + +| Name | Number | Description | +| ---- | ------ | ----------- | +| IP_NOT_USED | 0 | | +| IPv4 | 1 | | +| IPv6 | 2 | | + + + + + +### L7FlowType +This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26): + +| Name | Number | Description | +| ---- | ------ | ----------- | +| UNKNOWN_L7_TYPE | 0 | | +| REQUEST | 1 | | +| RESPONSE | 2 | | +| SAMPLE | 3 | | + + + + + +### LostEventSource + + +| Name | Number | Description | +| ---- | ------ | ----------- | +| UNKNOWN_LOST_EVENT_SOURCE | 0 | | +| PERF_EVENT_RING_BUFFER | 1 | PERF_EVENT_RING_BUFFER indicates that events were dropped in the BPF perf event ring buffer, indicating that userspace agent did not keep up with the events produced by the datapath. | +| OBSERVER_EVENTS_QUEUE | 2 | OBSERVER_EVENTS_QUEUE indicates that events were dropped because the Hubble events queue was full, indicating that the Hubble observer did not keep up. | +| HUBBLE_RING_BUFFER | 3 | HUBBLE_RING_BUFFER indicates that the event was dropped because it could not be read from Hubble's ring buffer in time before being overwritten. | + + + + + +### SocketTranslationPoint +This mirrors enum xlate_point in bpf/lib/trace_sock.h + +| Name | Number | Description | +| ---- | ------ | ----------- | +| SOCK_XLATE_POINT_UNKNOWN | 0 | | +| SOCK_XLATE_POINT_PRE_DIRECTION_FWD | 1 | Pre service translation | +| SOCK_XLATE_POINT_POST_DIRECTION_FWD | 2 | Post service translation | +| SOCK_XLATE_POINT_PRE_DIRECTION_REV | 3 | Pre reverse service translation | +| SOCK_XLATE_POINT_POST_DIRECTION_REV | 4 | Post reverse service translation | + + + + + +### TraceObservationPoint + + +| Name | Number | Description | +| ---- | ------ | ----------- | +| UNKNOWN_POINT | 0 | Cilium treats 0 as TO_LXC, but its's something we should work to remove. This is intentionally set as unknown, so proto API can guarantee the observation point is always going to be present on trace events. | +| TO_PROXY | 1 | TO_PROXY indicates network packets are transmitted towards the l7 proxy. | +| TO_HOST | 2 | TO_HOST indicates network packets are transmitted towards the host namespace. | +| TO_STACK | 3 | TO_STACK indicates network packets are transmitted towards the Linux kernel network stack on host machine. | +| TO_OVERLAY | 4 | TO_OVERLAY indicates network packets are transmitted towards the tunnel device. | +| TO_ENDPOINT | 101 | TO_ENDPOINT indicates network packets are transmitted towards endpoints (containers). | +| FROM_ENDPOINT | 5 | FROM_ENDPOINT indicates network packets were received from endpoints (containers). | +| FROM_PROXY | 6 | FROM_PROXY indicates network packets were received from the l7 proxy. | +| FROM_HOST | 7 | FROM_HOST indicates network packets were received from the host namespace. | +| FROM_STACK | 8 | FROM_STACK indicates network packets were received from the Linux kernel network stack on host machine. | +| FROM_OVERLAY | 9 | FROM_OVERLAY indicates network packets were received from the tunnel device. | +| FROM_NETWORK | 10 | FROM_NETWORK indicates network packets were received from native devices. | +| TO_NETWORK | 11 | TO_NETWORK indicates network packets are transmitted towards native devices. | + + + + + +### TrafficDirection + + +| Name | Number | Description | +| ---- | ------ | ----------- | +| TRAFFIC_DIRECTION_UNKNOWN | 0 | | +| INGRESS | 1 | | +| EGRESS | 2 | | + + + + + +### Verdict + + +| Name | Number | Description | +| ---- | ------ | ----------- | +| VERDICT_UNKNOWN | 0 | UNKNOWN is used if there is no verdict for this flow event | +| FORWARDED | 1 | FORWARDED is used for flow events where the trace point has forwarded this packet or connection to the next processing entity. | +| DROPPED | 2 | DROPPED is used for flow events where the connection or packet has been dropped (e.g. due to a malformed packet, it being rejected by a network policy etc). The exact drop reason may be found in drop_reason_desc. | +| ERROR | 3 | ERROR is used for flow events where an error occurred during processing | +| AUDIT | 4 | AUDIT is used on policy verdict events in policy audit mode, to denominate flows that would have been dropped by policy if audit mode was turned off | +| REDIRECTED | 5 | REDIRECTED is used for flow events which have been redirected to the proxy | +| TRACED | 6 | TRACED is used for flow events which have been observed at a trace point, but no particular verdict has been reached yet | +| TRANSLATED | 7 | TRANSLATED is used for flow events where an address has been translated | + + + + + + + + + + +## Scalar Value Types + +| .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby | +| ----------- | ----- | --- | ---- | ------ | -- | -- | --- | ---- | +| double | | double | double | float | float64 | double | float | Float | +| float | | float | float | float | float32 | float | float | Float | +| int32 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| int64 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| uint32 | Uses variable-length encoding. | uint32 | int | int/long | uint32 | uint | integer | Bignum or Fixnum (as required) | +| uint64 | Uses variable-length encoding. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum or Fixnum (as required) | +| sint32 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| sint64 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| fixed32 | Always four bytes. More efficient than uint32 if values are often greater than 2^28. | uint32 | int | int | uint32 | uint | integer | Bignum or Fixnum (as required) | +| fixed64 | Always eight bytes. More efficient than uint64 if values are often greater than 2^56. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum | +| sfixed32 | Always four bytes. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| sfixed64 | Always eight bytes. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| bool | | bool | boolean | boolean | bool | bool | boolean | TrueClass/FalseClass | +| string | A string must always contain UTF-8 encoded or 7-bit ASCII text. | string | String | str/unicode | string | string | string | String (UTF-8) | +| bytes | May contain any arbitrary sequence of bytes. | string | ByteString | str | []byte | ByteString | string | String (ASCII-8BIT) | + diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go new file mode 100644 index 0000000000..ebed0110ed --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go @@ -0,0 +1,6125 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.24.0 +// source: flow/flow.proto + +package flow + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FlowType int32 + +const ( + FlowType_UNKNOWN_TYPE FlowType = 0 + FlowType_L3_L4 FlowType = 1 // not sure about the underscore here, but `L34` also reads strange + FlowType_L7 FlowType = 2 + FlowType_SOCK FlowType = 3 +) + +// Enum value maps for FlowType. +var ( + FlowType_name = map[int32]string{ + 0: "UNKNOWN_TYPE", + 1: "L3_L4", + 2: "L7", + 3: "SOCK", + } + FlowType_value = map[string]int32{ + "UNKNOWN_TYPE": 0, + "L3_L4": 1, + "L7": 2, + "SOCK": 3, + } +) + +func (x FlowType) Enum() *FlowType { + p := new(FlowType) + *p = x + return p +} + +func (x FlowType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FlowType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[0].Descriptor() +} + +func (FlowType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[0] +} + +func (x FlowType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FlowType.Descriptor instead. +func (FlowType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{0} +} + +// These types correspond to definitions in pkg/policy/l4.go. +type AuthType int32 + +const ( + AuthType_DISABLED AuthType = 0 + AuthType_SPIRE AuthType = 1 + AuthType_TEST_ALWAYS_FAIL AuthType = 2 +) + +// Enum value maps for AuthType. +var ( + AuthType_name = map[int32]string{ + 0: "DISABLED", + 1: "SPIRE", + 2: "TEST_ALWAYS_FAIL", + } + AuthType_value = map[string]int32{ + "DISABLED": 0, + "SPIRE": 1, + "TEST_ALWAYS_FAIL": 2, + } +) + +func (x AuthType) Enum() *AuthType { + p := new(AuthType) + *p = x + return p +} + +func (x AuthType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuthType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[1].Descriptor() +} + +func (AuthType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[1] +} + +func (x AuthType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuthType.Descriptor instead. +func (AuthType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{1} +} + +type TraceObservationPoint int32 + +const ( + // Cilium treats 0 as TO_LXC, but its's something we should work to remove. + // This is intentionally set as unknown, so proto API can guarantee the + // observation point is always going to be present on trace events. + TraceObservationPoint_UNKNOWN_POINT TraceObservationPoint = 0 + // TO_PROXY indicates network packets are transmitted towards the l7 proxy. + TraceObservationPoint_TO_PROXY TraceObservationPoint = 1 + // TO_HOST indicates network packets are transmitted towards the host + // namespace. + TraceObservationPoint_TO_HOST TraceObservationPoint = 2 + // TO_STACK indicates network packets are transmitted towards the Linux + // kernel network stack on host machine. + TraceObservationPoint_TO_STACK TraceObservationPoint = 3 + // TO_OVERLAY indicates network packets are transmitted towards the tunnel + // device. + TraceObservationPoint_TO_OVERLAY TraceObservationPoint = 4 + // TO_ENDPOINT indicates network packets are transmitted towards endpoints + // (containers). + TraceObservationPoint_TO_ENDPOINT TraceObservationPoint = 101 + // FROM_ENDPOINT indicates network packets were received from endpoints + // (containers). + TraceObservationPoint_FROM_ENDPOINT TraceObservationPoint = 5 + // FROM_PROXY indicates network packets were received from the l7 proxy. + TraceObservationPoint_FROM_PROXY TraceObservationPoint = 6 + // FROM_HOST indicates network packets were received from the host + // namespace. + TraceObservationPoint_FROM_HOST TraceObservationPoint = 7 + // FROM_STACK indicates network packets were received from the Linux kernel + // network stack on host machine. + TraceObservationPoint_FROM_STACK TraceObservationPoint = 8 + // FROM_OVERLAY indicates network packets were received from the tunnel + // device. + TraceObservationPoint_FROM_OVERLAY TraceObservationPoint = 9 + // FROM_NETWORK indicates network packets were received from native + // devices. + TraceObservationPoint_FROM_NETWORK TraceObservationPoint = 10 + // TO_NETWORK indicates network packets are transmitted towards native + // devices. + TraceObservationPoint_TO_NETWORK TraceObservationPoint = 11 +) + +// Enum value maps for TraceObservationPoint. +var ( + TraceObservationPoint_name = map[int32]string{ + 0: "UNKNOWN_POINT", + 1: "TO_PROXY", + 2: "TO_HOST", + 3: "TO_STACK", + 4: "TO_OVERLAY", + 101: "TO_ENDPOINT", + 5: "FROM_ENDPOINT", + 6: "FROM_PROXY", + 7: "FROM_HOST", + 8: "FROM_STACK", + 9: "FROM_OVERLAY", + 10: "FROM_NETWORK", + 11: "TO_NETWORK", + } + TraceObservationPoint_value = map[string]int32{ + "UNKNOWN_POINT": 0, + "TO_PROXY": 1, + "TO_HOST": 2, + "TO_STACK": 3, + "TO_OVERLAY": 4, + "TO_ENDPOINT": 101, + "FROM_ENDPOINT": 5, + "FROM_PROXY": 6, + "FROM_HOST": 7, + "FROM_STACK": 8, + "FROM_OVERLAY": 9, + "FROM_NETWORK": 10, + "TO_NETWORK": 11, + } +) + +func (x TraceObservationPoint) Enum() *TraceObservationPoint { + p := new(TraceObservationPoint) + *p = x + return p +} + +func (x TraceObservationPoint) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TraceObservationPoint) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[2].Descriptor() +} + +func (TraceObservationPoint) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[2] +} + +func (x TraceObservationPoint) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TraceObservationPoint.Descriptor instead. +func (TraceObservationPoint) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{2} +} + +// This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26): +type L7FlowType int32 + +const ( + L7FlowType_UNKNOWN_L7_TYPE L7FlowType = 0 + L7FlowType_REQUEST L7FlowType = 1 + L7FlowType_RESPONSE L7FlowType = 2 + L7FlowType_SAMPLE L7FlowType = 3 +) + +// Enum value maps for L7FlowType. +var ( + L7FlowType_name = map[int32]string{ + 0: "UNKNOWN_L7_TYPE", + 1: "REQUEST", + 2: "RESPONSE", + 3: "SAMPLE", + } + L7FlowType_value = map[string]int32{ + "UNKNOWN_L7_TYPE": 0, + "REQUEST": 1, + "RESPONSE": 2, + "SAMPLE": 3, + } +) + +func (x L7FlowType) Enum() *L7FlowType { + p := new(L7FlowType) + *p = x + return p +} + +func (x L7FlowType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (L7FlowType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[3].Descriptor() +} + +func (L7FlowType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[3] +} + +func (x L7FlowType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use L7FlowType.Descriptor instead. +func (L7FlowType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{3} +} + +type IPVersion int32 + +const ( + IPVersion_IP_NOT_USED IPVersion = 0 + IPVersion_IPv4 IPVersion = 1 + IPVersion_IPv6 IPVersion = 2 +) + +// Enum value maps for IPVersion. +var ( + IPVersion_name = map[int32]string{ + 0: "IP_NOT_USED", + 1: "IPv4", + 2: "IPv6", + } + IPVersion_value = map[string]int32{ + "IP_NOT_USED": 0, + "IPv4": 1, + "IPv6": 2, + } +) + +func (x IPVersion) Enum() *IPVersion { + p := new(IPVersion) + *p = x + return p +} + +func (x IPVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IPVersion) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[4].Descriptor() +} + +func (IPVersion) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[4] +} + +func (x IPVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IPVersion.Descriptor instead. +func (IPVersion) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{4} +} + +type Verdict int32 + +const ( + // UNKNOWN is used if there is no verdict for this flow event + Verdict_VERDICT_UNKNOWN Verdict = 0 + // FORWARDED is used for flow events where the trace point has forwarded + // this packet or connection to the next processing entity. + Verdict_FORWARDED Verdict = 1 + // DROPPED is used for flow events where the connection or packet has + // been dropped (e.g. due to a malformed packet, it being rejected by a + // network policy etc). The exact drop reason may be found in drop_reason_desc. + Verdict_DROPPED Verdict = 2 + // ERROR is used for flow events where an error occurred during processing + Verdict_ERROR Verdict = 3 + // AUDIT is used on policy verdict events in policy audit mode, to + // denominate flows that would have been dropped by policy if audit mode + // was turned off + Verdict_AUDIT Verdict = 4 + // REDIRECTED is used for flow events which have been redirected to the proxy + Verdict_REDIRECTED Verdict = 5 + // TRACED is used for flow events which have been observed at a trace point, + // but no particular verdict has been reached yet + Verdict_TRACED Verdict = 6 + // TRANSLATED is used for flow events where an address has been translated + Verdict_TRANSLATED Verdict = 7 +) + +// Enum value maps for Verdict. +var ( + Verdict_name = map[int32]string{ + 0: "VERDICT_UNKNOWN", + 1: "FORWARDED", + 2: "DROPPED", + 3: "ERROR", + 4: "AUDIT", + 5: "REDIRECTED", + 6: "TRACED", + 7: "TRANSLATED", + } + Verdict_value = map[string]int32{ + "VERDICT_UNKNOWN": 0, + "FORWARDED": 1, + "DROPPED": 2, + "ERROR": 3, + "AUDIT": 4, + "REDIRECTED": 5, + "TRACED": 6, + "TRANSLATED": 7, + } +) + +func (x Verdict) Enum() *Verdict { + p := new(Verdict) + *p = x + return p +} + +func (x Verdict) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Verdict) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[5].Descriptor() +} + +func (Verdict) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[5] +} + +func (x Verdict) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Verdict.Descriptor instead. +func (Verdict) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{5} +} + +// These values are shared with pkg/monitor/api/drop.go and bpf/lib/common.h. +// Note that non-drop reasons (i.e. values less than api.DropMin) are not used +// here. +type DropReason int32 + +const ( + // non-drop reasons + DropReason_DROP_REASON_UNKNOWN DropReason = 0 + // drop reasons + DropReason_INVALID_SOURCE_MAC DropReason = 130 + DropReason_INVALID_DESTINATION_MAC DropReason = 131 + DropReason_INVALID_SOURCE_IP DropReason = 132 + DropReason_POLICY_DENIED DropReason = 133 + DropReason_INVALID_PACKET_DROPPED DropReason = 134 + DropReason_CT_TRUNCATED_OR_INVALID_HEADER DropReason = 135 + DropReason_CT_MISSING_TCP_ACK_FLAG DropReason = 136 + DropReason_CT_UNKNOWN_L4_PROTOCOL DropReason = 137 + DropReason_CT_CANNOT_CREATE_ENTRY_FROM_PACKET DropReason = 138 + DropReason_UNSUPPORTED_L3_PROTOCOL DropReason = 139 + DropReason_MISSED_TAIL_CALL DropReason = 140 + DropReason_ERROR_WRITING_TO_PACKET DropReason = 141 + DropReason_UNKNOWN_L4_PROTOCOL DropReason = 142 + DropReason_UNKNOWN_ICMPV4_CODE DropReason = 143 + DropReason_UNKNOWN_ICMPV4_TYPE DropReason = 144 + DropReason_UNKNOWN_ICMPV6_CODE DropReason = 145 + DropReason_UNKNOWN_ICMPV6_TYPE DropReason = 146 + DropReason_ERROR_RETRIEVING_TUNNEL_KEY DropReason = 147 + DropReason_ERROR_RETRIEVING_TUNNEL_OPTIONS DropReason = 148 + DropReason_INVALID_GENEVE_OPTION DropReason = 149 + DropReason_UNKNOWN_L3_TARGET_ADDRESS DropReason = 150 + DropReason_STALE_OR_UNROUTABLE_IP DropReason = 151 + DropReason_NO_MATCHING_LOCAL_CONTAINER_FOUND DropReason = 152 + DropReason_ERROR_WHILE_CORRECTING_L3_CHECKSUM DropReason = 153 + DropReason_ERROR_WHILE_CORRECTING_L4_CHECKSUM DropReason = 154 + DropReason_CT_MAP_INSERTION_FAILED DropReason = 155 + DropReason_INVALID_IPV6_EXTENSION_HEADER DropReason = 156 + DropReason_IP_FRAGMENTATION_NOT_SUPPORTED DropReason = 157 + DropReason_SERVICE_BACKEND_NOT_FOUND DropReason = 158 + DropReason_NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT DropReason = 160 + DropReason_FAILED_TO_INSERT_INTO_PROXYMAP DropReason = 161 + DropReason_REACHED_EDT_RATE_LIMITING_DROP_HORIZON DropReason = 162 + DropReason_UNKNOWN_CONNECTION_TRACKING_STATE DropReason = 163 + DropReason_LOCAL_HOST_IS_UNREACHABLE DropReason = 164 + DropReason_NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION DropReason = 165 + DropReason_UNSUPPORTED_L2_PROTOCOL DropReason = 166 + DropReason_NO_MAPPING_FOR_NAT_MASQUERADE DropReason = 167 + DropReason_UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE DropReason = 168 + DropReason_FIB_LOOKUP_FAILED DropReason = 169 + DropReason_ENCAPSULATION_TRAFFIC_IS_PROHIBITED DropReason = 170 + DropReason_INVALID_IDENTITY DropReason = 171 + DropReason_UNKNOWN_SENDER DropReason = 172 + DropReason_NAT_NOT_NEEDED DropReason = 173 + DropReason_IS_A_CLUSTERIP DropReason = 174 + DropReason_FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND DropReason = 175 + DropReason_FORBIDDEN_ICMPV6_MESSAGE DropReason = 176 + DropReason_DENIED_BY_LB_SRC_RANGE_CHECK DropReason = 177 + DropReason_SOCKET_LOOKUP_FAILED DropReason = 178 + DropReason_SOCKET_ASSIGN_FAILED DropReason = 179 + DropReason_PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL DropReason = 180 + DropReason_POLICY_DENY DropReason = 181 + DropReason_VLAN_FILTERED DropReason = 182 + DropReason_INVALID_VNI DropReason = 183 + DropReason_INVALID_TC_BUFFER DropReason = 184 + DropReason_NO_SID DropReason = 185 + DropReason_MISSING_SRV6_STATE DropReason = 186 + DropReason_NAT46 DropReason = 187 + DropReason_NAT64 DropReason = 188 + DropReason_AUTH_REQUIRED DropReason = 189 + DropReason_CT_NO_MAP_FOUND DropReason = 190 + DropReason_SNAT_NO_MAP_FOUND DropReason = 191 + DropReason_INVALID_CLUSTER_ID DropReason = 192 + DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP DropReason = 193 + DropReason_NO_EGRESS_GATEWAY DropReason = 194 + DropReason_UNENCRYPTED_TRAFFIC DropReason = 195 + DropReason_TTL_EXCEEDED DropReason = 196 + DropReason_NO_NODE_ID DropReason = 197 + DropReason_DROP_RATE_LIMITED DropReason = 198 + DropReason_IGMP_HANDLED DropReason = 199 + DropReason_IGMP_SUBSCRIBED DropReason = 200 + DropReason_MULTICAST_HANDLED DropReason = 201 + // A BPF program wants to tail call into bpf_host, but the host datapath + // hasn't been loaded yet. + DropReason_DROP_HOST_NOT_READY DropReason = 202 + // A BPF program wants to tail call some endpoint's policy program in the + // POLICY_CALL_MAP, but the program is not available. + DropReason_DROP_EP_NOT_READY DropReason = 203 +) + +// Enum value maps for DropReason. +var ( + DropReason_name = map[int32]string{ + 0: "DROP_REASON_UNKNOWN", + 130: "INVALID_SOURCE_MAC", + 131: "INVALID_DESTINATION_MAC", + 132: "INVALID_SOURCE_IP", + 133: "POLICY_DENIED", + 134: "INVALID_PACKET_DROPPED", + 135: "CT_TRUNCATED_OR_INVALID_HEADER", + 136: "CT_MISSING_TCP_ACK_FLAG", + 137: "CT_UNKNOWN_L4_PROTOCOL", + 138: "CT_CANNOT_CREATE_ENTRY_FROM_PACKET", + 139: "UNSUPPORTED_L3_PROTOCOL", + 140: "MISSED_TAIL_CALL", + 141: "ERROR_WRITING_TO_PACKET", + 142: "UNKNOWN_L4_PROTOCOL", + 143: "UNKNOWN_ICMPV4_CODE", + 144: "UNKNOWN_ICMPV4_TYPE", + 145: "UNKNOWN_ICMPV6_CODE", + 146: "UNKNOWN_ICMPV6_TYPE", + 147: "ERROR_RETRIEVING_TUNNEL_KEY", + 148: "ERROR_RETRIEVING_TUNNEL_OPTIONS", + 149: "INVALID_GENEVE_OPTION", + 150: "UNKNOWN_L3_TARGET_ADDRESS", + 151: "STALE_OR_UNROUTABLE_IP", + 152: "NO_MATCHING_LOCAL_CONTAINER_FOUND", + 153: "ERROR_WHILE_CORRECTING_L3_CHECKSUM", + 154: "ERROR_WHILE_CORRECTING_L4_CHECKSUM", + 155: "CT_MAP_INSERTION_FAILED", + 156: "INVALID_IPV6_EXTENSION_HEADER", + 157: "IP_FRAGMENTATION_NOT_SUPPORTED", + 158: "SERVICE_BACKEND_NOT_FOUND", + 160: "NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT", + 161: "FAILED_TO_INSERT_INTO_PROXYMAP", + 162: "REACHED_EDT_RATE_LIMITING_DROP_HORIZON", + 163: "UNKNOWN_CONNECTION_TRACKING_STATE", + 164: "LOCAL_HOST_IS_UNREACHABLE", + 165: "NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION", + 166: "UNSUPPORTED_L2_PROTOCOL", + 167: "NO_MAPPING_FOR_NAT_MASQUERADE", + 168: "UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE", + 169: "FIB_LOOKUP_FAILED", + 170: "ENCAPSULATION_TRAFFIC_IS_PROHIBITED", + 171: "INVALID_IDENTITY", + 172: "UNKNOWN_SENDER", + 173: "NAT_NOT_NEEDED", + 174: "IS_A_CLUSTERIP", + 175: "FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND", + 176: "FORBIDDEN_ICMPV6_MESSAGE", + 177: "DENIED_BY_LB_SRC_RANGE_CHECK", + 178: "SOCKET_LOOKUP_FAILED", + 179: "SOCKET_ASSIGN_FAILED", + 180: "PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL", + 181: "POLICY_DENY", + 182: "VLAN_FILTERED", + 183: "INVALID_VNI", + 184: "INVALID_TC_BUFFER", + 185: "NO_SID", + 186: "MISSING_SRV6_STATE", + 187: "NAT46", + 188: "NAT64", + 189: "AUTH_REQUIRED", + 190: "CT_NO_MAP_FOUND", + 191: "SNAT_NO_MAP_FOUND", + 192: "INVALID_CLUSTER_ID", + 193: "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP", + 194: "NO_EGRESS_GATEWAY", + 195: "UNENCRYPTED_TRAFFIC", + 196: "TTL_EXCEEDED", + 197: "NO_NODE_ID", + 198: "DROP_RATE_LIMITED", + 199: "IGMP_HANDLED", + 200: "IGMP_SUBSCRIBED", + 201: "MULTICAST_HANDLED", + 202: "DROP_HOST_NOT_READY", + 203: "DROP_EP_NOT_READY", + } + DropReason_value = map[string]int32{ + "DROP_REASON_UNKNOWN": 0, + "INVALID_SOURCE_MAC": 130, + "INVALID_DESTINATION_MAC": 131, + "INVALID_SOURCE_IP": 132, + "POLICY_DENIED": 133, + "INVALID_PACKET_DROPPED": 134, + "CT_TRUNCATED_OR_INVALID_HEADER": 135, + "CT_MISSING_TCP_ACK_FLAG": 136, + "CT_UNKNOWN_L4_PROTOCOL": 137, + "CT_CANNOT_CREATE_ENTRY_FROM_PACKET": 138, + "UNSUPPORTED_L3_PROTOCOL": 139, + "MISSED_TAIL_CALL": 140, + "ERROR_WRITING_TO_PACKET": 141, + "UNKNOWN_L4_PROTOCOL": 142, + "UNKNOWN_ICMPV4_CODE": 143, + "UNKNOWN_ICMPV4_TYPE": 144, + "UNKNOWN_ICMPV6_CODE": 145, + "UNKNOWN_ICMPV6_TYPE": 146, + "ERROR_RETRIEVING_TUNNEL_KEY": 147, + "ERROR_RETRIEVING_TUNNEL_OPTIONS": 148, + "INVALID_GENEVE_OPTION": 149, + "UNKNOWN_L3_TARGET_ADDRESS": 150, + "STALE_OR_UNROUTABLE_IP": 151, + "NO_MATCHING_LOCAL_CONTAINER_FOUND": 152, + "ERROR_WHILE_CORRECTING_L3_CHECKSUM": 153, + "ERROR_WHILE_CORRECTING_L4_CHECKSUM": 154, + "CT_MAP_INSERTION_FAILED": 155, + "INVALID_IPV6_EXTENSION_HEADER": 156, + "IP_FRAGMENTATION_NOT_SUPPORTED": 157, + "SERVICE_BACKEND_NOT_FOUND": 158, + "NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT": 160, + "FAILED_TO_INSERT_INTO_PROXYMAP": 161, + "REACHED_EDT_RATE_LIMITING_DROP_HORIZON": 162, + "UNKNOWN_CONNECTION_TRACKING_STATE": 163, + "LOCAL_HOST_IS_UNREACHABLE": 164, + "NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION": 165, + "UNSUPPORTED_L2_PROTOCOL": 166, + "NO_MAPPING_FOR_NAT_MASQUERADE": 167, + "UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE": 168, + "FIB_LOOKUP_FAILED": 169, + "ENCAPSULATION_TRAFFIC_IS_PROHIBITED": 170, + "INVALID_IDENTITY": 171, + "UNKNOWN_SENDER": 172, + "NAT_NOT_NEEDED": 173, + "IS_A_CLUSTERIP": 174, + "FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND": 175, + "FORBIDDEN_ICMPV6_MESSAGE": 176, + "DENIED_BY_LB_SRC_RANGE_CHECK": 177, + "SOCKET_LOOKUP_FAILED": 178, + "SOCKET_ASSIGN_FAILED": 179, + "PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL": 180, + "POLICY_DENY": 181, + "VLAN_FILTERED": 182, + "INVALID_VNI": 183, + "INVALID_TC_BUFFER": 184, + "NO_SID": 185, + "MISSING_SRV6_STATE": 186, + "NAT46": 187, + "NAT64": 188, + "AUTH_REQUIRED": 189, + "CT_NO_MAP_FOUND": 190, + "SNAT_NO_MAP_FOUND": 191, + "INVALID_CLUSTER_ID": 192, + "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP": 193, + "NO_EGRESS_GATEWAY": 194, + "UNENCRYPTED_TRAFFIC": 195, + "TTL_EXCEEDED": 196, + "NO_NODE_ID": 197, + "DROP_RATE_LIMITED": 198, + "IGMP_HANDLED": 199, + "IGMP_SUBSCRIBED": 200, + "MULTICAST_HANDLED": 201, + "DROP_HOST_NOT_READY": 202, + "DROP_EP_NOT_READY": 203, + } +) + +func (x DropReason) Enum() *DropReason { + p := new(DropReason) + *p = x + return p +} + +func (x DropReason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DropReason) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[6].Descriptor() +} + +func (DropReason) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[6] +} + +func (x DropReason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DropReason.Descriptor instead. +func (DropReason) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{6} +} + +type TrafficDirection int32 + +const ( + TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN TrafficDirection = 0 + TrafficDirection_INGRESS TrafficDirection = 1 + TrafficDirection_EGRESS TrafficDirection = 2 +) + +// Enum value maps for TrafficDirection. +var ( + TrafficDirection_name = map[int32]string{ + 0: "TRAFFIC_DIRECTION_UNKNOWN", + 1: "INGRESS", + 2: "EGRESS", + } + TrafficDirection_value = map[string]int32{ + "TRAFFIC_DIRECTION_UNKNOWN": 0, + "INGRESS": 1, + "EGRESS": 2, + } +) + +func (x TrafficDirection) Enum() *TrafficDirection { + p := new(TrafficDirection) + *p = x + return p +} + +func (x TrafficDirection) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[7].Descriptor() +} + +func (TrafficDirection) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[7] +} + +func (x TrafficDirection) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TrafficDirection.Descriptor instead. +func (TrafficDirection) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{7} +} + +// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. +type DebugCapturePoint int32 + +const ( + DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN DebugCapturePoint = 0 + DebugCapturePoint_DBG_CAPTURE_DELIVERY DebugCapturePoint = 4 + DebugCapturePoint_DBG_CAPTURE_FROM_LB DebugCapturePoint = 5 + DebugCapturePoint_DBG_CAPTURE_AFTER_V46 DebugCapturePoint = 6 + DebugCapturePoint_DBG_CAPTURE_AFTER_V64 DebugCapturePoint = 7 + DebugCapturePoint_DBG_CAPTURE_PROXY_PRE DebugCapturePoint = 8 + DebugCapturePoint_DBG_CAPTURE_PROXY_POST DebugCapturePoint = 9 + DebugCapturePoint_DBG_CAPTURE_SNAT_PRE DebugCapturePoint = 10 + DebugCapturePoint_DBG_CAPTURE_SNAT_POST DebugCapturePoint = 11 +) + +// Enum value maps for DebugCapturePoint. +var ( + DebugCapturePoint_name = map[int32]string{ + 0: "DBG_CAPTURE_POINT_UNKNOWN", + 4: "DBG_CAPTURE_DELIVERY", + 5: "DBG_CAPTURE_FROM_LB", + 6: "DBG_CAPTURE_AFTER_V46", + 7: "DBG_CAPTURE_AFTER_V64", + 8: "DBG_CAPTURE_PROXY_PRE", + 9: "DBG_CAPTURE_PROXY_POST", + 10: "DBG_CAPTURE_SNAT_PRE", + 11: "DBG_CAPTURE_SNAT_POST", + } + DebugCapturePoint_value = map[string]int32{ + "DBG_CAPTURE_POINT_UNKNOWN": 0, + "DBG_CAPTURE_DELIVERY": 4, + "DBG_CAPTURE_FROM_LB": 5, + "DBG_CAPTURE_AFTER_V46": 6, + "DBG_CAPTURE_AFTER_V64": 7, + "DBG_CAPTURE_PROXY_PRE": 8, + "DBG_CAPTURE_PROXY_POST": 9, + "DBG_CAPTURE_SNAT_PRE": 10, + "DBG_CAPTURE_SNAT_POST": 11, + } +) + +func (x DebugCapturePoint) Enum() *DebugCapturePoint { + p := new(DebugCapturePoint) + *p = x + return p +} + +func (x DebugCapturePoint) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DebugCapturePoint) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[8].Descriptor() +} + +func (DebugCapturePoint) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[8] +} + +func (x DebugCapturePoint) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DebugCapturePoint.Descriptor instead. +func (DebugCapturePoint) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{8} +} + +// EventType are constants are based on the ones from . +type EventType int32 + +const ( + EventType_UNKNOWN EventType = 0 + // EventSample is equivalent to PERF_RECORD_SAMPLE. + EventType_EventSample EventType = 9 + // RecordLost is equivalent to PERF_RECORD_LOST. + EventType_RecordLost EventType = 2 +) + +// Enum value maps for EventType. +var ( + EventType_name = map[int32]string{ + 0: "UNKNOWN", + 9: "EventSample", + 2: "RecordLost", + } + EventType_value = map[string]int32{ + "UNKNOWN": 0, + "EventSample": 9, + "RecordLost": 2, + } +) + +func (x EventType) Enum() *EventType { + p := new(EventType) + *p = x + return p +} + +func (x EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EventType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[9].Descriptor() +} + +func (EventType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[9] +} + +func (x EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EventType.Descriptor instead. +func (EventType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{9} +} + +type LostEventSource int32 + +const ( + LostEventSource_UNKNOWN_LOST_EVENT_SOURCE LostEventSource = 0 + // PERF_EVENT_RING_BUFFER indicates that events were dropped in the BPF + // perf event ring buffer, indicating that userspace agent did not keep up + // with the events produced by the datapath. + LostEventSource_PERF_EVENT_RING_BUFFER LostEventSource = 1 + // OBSERVER_EVENTS_QUEUE indicates that events were dropped because the + // Hubble events queue was full, indicating that the Hubble observer did + // not keep up. + LostEventSource_OBSERVER_EVENTS_QUEUE LostEventSource = 2 + // HUBBLE_RING_BUFFER indicates that the event was dropped because it could + // not be read from Hubble's ring buffer in time before being overwritten. + LostEventSource_HUBBLE_RING_BUFFER LostEventSource = 3 +) + +// Enum value maps for LostEventSource. +var ( + LostEventSource_name = map[int32]string{ + 0: "UNKNOWN_LOST_EVENT_SOURCE", + 1: "PERF_EVENT_RING_BUFFER", + 2: "OBSERVER_EVENTS_QUEUE", + 3: "HUBBLE_RING_BUFFER", + } + LostEventSource_value = map[string]int32{ + "UNKNOWN_LOST_EVENT_SOURCE": 0, + "PERF_EVENT_RING_BUFFER": 1, + "OBSERVER_EVENTS_QUEUE": 2, + "HUBBLE_RING_BUFFER": 3, + } +) + +func (x LostEventSource) Enum() *LostEventSource { + p := new(LostEventSource) + *p = x + return p +} + +func (x LostEventSource) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LostEventSource) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[10].Descriptor() +} + +func (LostEventSource) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[10] +} + +func (x LostEventSource) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LostEventSource.Descriptor instead. +func (LostEventSource) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{10} +} + +// AgentEventType is the type of agent event. These values are shared with type +// AgentNotification in pkg/monitor/api/types.go. +type AgentEventType int32 + +const ( + AgentEventType_AGENT_EVENT_UNKNOWN AgentEventType = 0 + AgentEventType_AGENT_STARTED AgentEventType = 2 + AgentEventType_POLICY_UPDATED AgentEventType = 3 + AgentEventType_POLICY_DELETED AgentEventType = 4 + AgentEventType_ENDPOINT_REGENERATE_SUCCESS AgentEventType = 5 + AgentEventType_ENDPOINT_REGENERATE_FAILURE AgentEventType = 6 + AgentEventType_ENDPOINT_CREATED AgentEventType = 7 + AgentEventType_ENDPOINT_DELETED AgentEventType = 8 + AgentEventType_IPCACHE_UPSERTED AgentEventType = 9 + AgentEventType_IPCACHE_DELETED AgentEventType = 10 + AgentEventType_SERVICE_UPSERTED AgentEventType = 11 + AgentEventType_SERVICE_DELETED AgentEventType = 12 +) + +// Enum value maps for AgentEventType. +var ( + AgentEventType_name = map[int32]string{ + 0: "AGENT_EVENT_UNKNOWN", + 2: "AGENT_STARTED", + 3: "POLICY_UPDATED", + 4: "POLICY_DELETED", + 5: "ENDPOINT_REGENERATE_SUCCESS", + 6: "ENDPOINT_REGENERATE_FAILURE", + 7: "ENDPOINT_CREATED", + 8: "ENDPOINT_DELETED", + 9: "IPCACHE_UPSERTED", + 10: "IPCACHE_DELETED", + 11: "SERVICE_UPSERTED", + 12: "SERVICE_DELETED", + } + AgentEventType_value = map[string]int32{ + "AGENT_EVENT_UNKNOWN": 0, + "AGENT_STARTED": 2, + "POLICY_UPDATED": 3, + "POLICY_DELETED": 4, + "ENDPOINT_REGENERATE_SUCCESS": 5, + "ENDPOINT_REGENERATE_FAILURE": 6, + "ENDPOINT_CREATED": 7, + "ENDPOINT_DELETED": 8, + "IPCACHE_UPSERTED": 9, + "IPCACHE_DELETED": 10, + "SERVICE_UPSERTED": 11, + "SERVICE_DELETED": 12, + } +) + +func (x AgentEventType) Enum() *AgentEventType { + p := new(AgentEventType) + *p = x + return p +} + +func (x AgentEventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AgentEventType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[11].Descriptor() +} + +func (AgentEventType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[11] +} + +func (x AgentEventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AgentEventType.Descriptor instead. +func (AgentEventType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{11} +} + +// This mirrors enum xlate_point in bpf/lib/trace_sock.h +type SocketTranslationPoint int32 + +const ( + SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN SocketTranslationPoint = 0 + SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_FWD SocketTranslationPoint = 1 // Pre service translation + SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_FWD SocketTranslationPoint = 2 // Post service translation + SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_REV SocketTranslationPoint = 3 // Pre reverse service translation + SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_REV SocketTranslationPoint = 4 // Post reverse service translation +) + +// Enum value maps for SocketTranslationPoint. +var ( + SocketTranslationPoint_name = map[int32]string{ + 0: "SOCK_XLATE_POINT_UNKNOWN", + 1: "SOCK_XLATE_POINT_PRE_DIRECTION_FWD", + 2: "SOCK_XLATE_POINT_POST_DIRECTION_FWD", + 3: "SOCK_XLATE_POINT_PRE_DIRECTION_REV", + 4: "SOCK_XLATE_POINT_POST_DIRECTION_REV", + } + SocketTranslationPoint_value = map[string]int32{ + "SOCK_XLATE_POINT_UNKNOWN": 0, + "SOCK_XLATE_POINT_PRE_DIRECTION_FWD": 1, + "SOCK_XLATE_POINT_POST_DIRECTION_FWD": 2, + "SOCK_XLATE_POINT_PRE_DIRECTION_REV": 3, + "SOCK_XLATE_POINT_POST_DIRECTION_REV": 4, + } +) + +func (x SocketTranslationPoint) Enum() *SocketTranslationPoint { + p := new(SocketTranslationPoint) + *p = x + return p +} + +func (x SocketTranslationPoint) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SocketTranslationPoint) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[12].Descriptor() +} + +func (SocketTranslationPoint) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[12] +} + +func (x SocketTranslationPoint) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SocketTranslationPoint.Descriptor instead. +func (SocketTranslationPoint) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{12} +} + +// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. +type DebugEventType int32 + +const ( + DebugEventType_DBG_EVENT_UNKNOWN DebugEventType = 0 + DebugEventType_DBG_GENERIC DebugEventType = 1 + DebugEventType_DBG_LOCAL_DELIVERY DebugEventType = 2 + DebugEventType_DBG_ENCAP DebugEventType = 3 + DebugEventType_DBG_LXC_FOUND DebugEventType = 4 + DebugEventType_DBG_POLICY_DENIED DebugEventType = 5 + DebugEventType_DBG_CT_LOOKUP DebugEventType = 6 + DebugEventType_DBG_CT_LOOKUP_REV DebugEventType = 7 + DebugEventType_DBG_CT_MATCH DebugEventType = 8 + DebugEventType_DBG_CT_CREATED DebugEventType = 9 + DebugEventType_DBG_CT_CREATED2 DebugEventType = 10 + DebugEventType_DBG_ICMP6_HANDLE DebugEventType = 11 + DebugEventType_DBG_ICMP6_REQUEST DebugEventType = 12 + DebugEventType_DBG_ICMP6_NS DebugEventType = 13 + DebugEventType_DBG_ICMP6_TIME_EXCEEDED DebugEventType = 14 + DebugEventType_DBG_CT_VERDICT DebugEventType = 15 + DebugEventType_DBG_DECAP DebugEventType = 16 + DebugEventType_DBG_PORT_MAP DebugEventType = 17 + DebugEventType_DBG_ERROR_RET DebugEventType = 18 + DebugEventType_DBG_TO_HOST DebugEventType = 19 + DebugEventType_DBG_TO_STACK DebugEventType = 20 + DebugEventType_DBG_PKT_HASH DebugEventType = 21 + DebugEventType_DBG_LB6_LOOKUP_FRONTEND DebugEventType = 22 + DebugEventType_DBG_LB6_LOOKUP_FRONTEND_FAIL DebugEventType = 23 + DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT DebugEventType = 24 + DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS DebugEventType = 25 + DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL DebugEventType = 26 + DebugEventType_DBG_LB6_LOOKUP_BACKEND_FAIL DebugEventType = 27 + DebugEventType_DBG_LB6_REVERSE_NAT_LOOKUP DebugEventType = 28 + DebugEventType_DBG_LB6_REVERSE_NAT DebugEventType = 29 + DebugEventType_DBG_LB4_LOOKUP_FRONTEND DebugEventType = 30 + DebugEventType_DBG_LB4_LOOKUP_FRONTEND_FAIL DebugEventType = 31 + DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT DebugEventType = 32 + DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS DebugEventType = 33 + DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL DebugEventType = 34 + DebugEventType_DBG_LB4_LOOKUP_BACKEND_FAIL DebugEventType = 35 + DebugEventType_DBG_LB4_REVERSE_NAT_LOOKUP DebugEventType = 36 + DebugEventType_DBG_LB4_REVERSE_NAT DebugEventType = 37 + DebugEventType_DBG_LB4_LOOPBACK_SNAT DebugEventType = 38 + DebugEventType_DBG_LB4_LOOPBACK_SNAT_REV DebugEventType = 39 + DebugEventType_DBG_CT_LOOKUP4 DebugEventType = 40 + DebugEventType_DBG_RR_BACKEND_SLOT_SEL DebugEventType = 41 + DebugEventType_DBG_REV_PROXY_LOOKUP DebugEventType = 42 + DebugEventType_DBG_REV_PROXY_FOUND DebugEventType = 43 + DebugEventType_DBG_REV_PROXY_UPDATE DebugEventType = 44 + DebugEventType_DBG_L4_POLICY DebugEventType = 45 + DebugEventType_DBG_NETDEV_IN_CLUSTER DebugEventType = 46 + DebugEventType_DBG_NETDEV_ENCAP4 DebugEventType = 47 + DebugEventType_DBG_CT_LOOKUP4_1 DebugEventType = 48 + DebugEventType_DBG_CT_LOOKUP4_2 DebugEventType = 49 + DebugEventType_DBG_CT_CREATED4 DebugEventType = 50 + DebugEventType_DBG_CT_LOOKUP6_1 DebugEventType = 51 + DebugEventType_DBG_CT_LOOKUP6_2 DebugEventType = 52 + DebugEventType_DBG_CT_CREATED6 DebugEventType = 53 + DebugEventType_DBG_SKIP_PROXY DebugEventType = 54 + DebugEventType_DBG_L4_CREATE DebugEventType = 55 + DebugEventType_DBG_IP_ID_MAP_FAILED4 DebugEventType = 56 + DebugEventType_DBG_IP_ID_MAP_FAILED6 DebugEventType = 57 + DebugEventType_DBG_IP_ID_MAP_SUCCEED4 DebugEventType = 58 + DebugEventType_DBG_IP_ID_MAP_SUCCEED6 DebugEventType = 59 + DebugEventType_DBG_LB_STALE_CT DebugEventType = 60 + DebugEventType_DBG_INHERIT_IDENTITY DebugEventType = 61 + DebugEventType_DBG_SK_LOOKUP4 DebugEventType = 62 + DebugEventType_DBG_SK_LOOKUP6 DebugEventType = 63 + DebugEventType_DBG_SK_ASSIGN DebugEventType = 64 + DebugEventType_DBG_L7_LB DebugEventType = 65 + DebugEventType_DBG_SKIP_POLICY DebugEventType = 66 +) + +// Enum value maps for DebugEventType. +var ( + DebugEventType_name = map[int32]string{ + 0: "DBG_EVENT_UNKNOWN", + 1: "DBG_GENERIC", + 2: "DBG_LOCAL_DELIVERY", + 3: "DBG_ENCAP", + 4: "DBG_LXC_FOUND", + 5: "DBG_POLICY_DENIED", + 6: "DBG_CT_LOOKUP", + 7: "DBG_CT_LOOKUP_REV", + 8: "DBG_CT_MATCH", + 9: "DBG_CT_CREATED", + 10: "DBG_CT_CREATED2", + 11: "DBG_ICMP6_HANDLE", + 12: "DBG_ICMP6_REQUEST", + 13: "DBG_ICMP6_NS", + 14: "DBG_ICMP6_TIME_EXCEEDED", + 15: "DBG_CT_VERDICT", + 16: "DBG_DECAP", + 17: "DBG_PORT_MAP", + 18: "DBG_ERROR_RET", + 19: "DBG_TO_HOST", + 20: "DBG_TO_STACK", + 21: "DBG_PKT_HASH", + 22: "DBG_LB6_LOOKUP_FRONTEND", + 23: "DBG_LB6_LOOKUP_FRONTEND_FAIL", + 24: "DBG_LB6_LOOKUP_BACKEND_SLOT", + 25: "DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS", + 26: "DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL", + 27: "DBG_LB6_LOOKUP_BACKEND_FAIL", + 28: "DBG_LB6_REVERSE_NAT_LOOKUP", + 29: "DBG_LB6_REVERSE_NAT", + 30: "DBG_LB4_LOOKUP_FRONTEND", + 31: "DBG_LB4_LOOKUP_FRONTEND_FAIL", + 32: "DBG_LB4_LOOKUP_BACKEND_SLOT", + 33: "DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS", + 34: "DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL", + 35: "DBG_LB4_LOOKUP_BACKEND_FAIL", + 36: "DBG_LB4_REVERSE_NAT_LOOKUP", + 37: "DBG_LB4_REVERSE_NAT", + 38: "DBG_LB4_LOOPBACK_SNAT", + 39: "DBG_LB4_LOOPBACK_SNAT_REV", + 40: "DBG_CT_LOOKUP4", + 41: "DBG_RR_BACKEND_SLOT_SEL", + 42: "DBG_REV_PROXY_LOOKUP", + 43: "DBG_REV_PROXY_FOUND", + 44: "DBG_REV_PROXY_UPDATE", + 45: "DBG_L4_POLICY", + 46: "DBG_NETDEV_IN_CLUSTER", + 47: "DBG_NETDEV_ENCAP4", + 48: "DBG_CT_LOOKUP4_1", + 49: "DBG_CT_LOOKUP4_2", + 50: "DBG_CT_CREATED4", + 51: "DBG_CT_LOOKUP6_1", + 52: "DBG_CT_LOOKUP6_2", + 53: "DBG_CT_CREATED6", + 54: "DBG_SKIP_PROXY", + 55: "DBG_L4_CREATE", + 56: "DBG_IP_ID_MAP_FAILED4", + 57: "DBG_IP_ID_MAP_FAILED6", + 58: "DBG_IP_ID_MAP_SUCCEED4", + 59: "DBG_IP_ID_MAP_SUCCEED6", + 60: "DBG_LB_STALE_CT", + 61: "DBG_INHERIT_IDENTITY", + 62: "DBG_SK_LOOKUP4", + 63: "DBG_SK_LOOKUP6", + 64: "DBG_SK_ASSIGN", + 65: "DBG_L7_LB", + 66: "DBG_SKIP_POLICY", + } + DebugEventType_value = map[string]int32{ + "DBG_EVENT_UNKNOWN": 0, + "DBG_GENERIC": 1, + "DBG_LOCAL_DELIVERY": 2, + "DBG_ENCAP": 3, + "DBG_LXC_FOUND": 4, + "DBG_POLICY_DENIED": 5, + "DBG_CT_LOOKUP": 6, + "DBG_CT_LOOKUP_REV": 7, + "DBG_CT_MATCH": 8, + "DBG_CT_CREATED": 9, + "DBG_CT_CREATED2": 10, + "DBG_ICMP6_HANDLE": 11, + "DBG_ICMP6_REQUEST": 12, + "DBG_ICMP6_NS": 13, + "DBG_ICMP6_TIME_EXCEEDED": 14, + "DBG_CT_VERDICT": 15, + "DBG_DECAP": 16, + "DBG_PORT_MAP": 17, + "DBG_ERROR_RET": 18, + "DBG_TO_HOST": 19, + "DBG_TO_STACK": 20, + "DBG_PKT_HASH": 21, + "DBG_LB6_LOOKUP_FRONTEND": 22, + "DBG_LB6_LOOKUP_FRONTEND_FAIL": 23, + "DBG_LB6_LOOKUP_BACKEND_SLOT": 24, + "DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS": 25, + "DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL": 26, + "DBG_LB6_LOOKUP_BACKEND_FAIL": 27, + "DBG_LB6_REVERSE_NAT_LOOKUP": 28, + "DBG_LB6_REVERSE_NAT": 29, + "DBG_LB4_LOOKUP_FRONTEND": 30, + "DBG_LB4_LOOKUP_FRONTEND_FAIL": 31, + "DBG_LB4_LOOKUP_BACKEND_SLOT": 32, + "DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS": 33, + "DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL": 34, + "DBG_LB4_LOOKUP_BACKEND_FAIL": 35, + "DBG_LB4_REVERSE_NAT_LOOKUP": 36, + "DBG_LB4_REVERSE_NAT": 37, + "DBG_LB4_LOOPBACK_SNAT": 38, + "DBG_LB4_LOOPBACK_SNAT_REV": 39, + "DBG_CT_LOOKUP4": 40, + "DBG_RR_BACKEND_SLOT_SEL": 41, + "DBG_REV_PROXY_LOOKUP": 42, + "DBG_REV_PROXY_FOUND": 43, + "DBG_REV_PROXY_UPDATE": 44, + "DBG_L4_POLICY": 45, + "DBG_NETDEV_IN_CLUSTER": 46, + "DBG_NETDEV_ENCAP4": 47, + "DBG_CT_LOOKUP4_1": 48, + "DBG_CT_LOOKUP4_2": 49, + "DBG_CT_CREATED4": 50, + "DBG_CT_LOOKUP6_1": 51, + "DBG_CT_LOOKUP6_2": 52, + "DBG_CT_CREATED6": 53, + "DBG_SKIP_PROXY": 54, + "DBG_L4_CREATE": 55, + "DBG_IP_ID_MAP_FAILED4": 56, + "DBG_IP_ID_MAP_FAILED6": 57, + "DBG_IP_ID_MAP_SUCCEED4": 58, + "DBG_IP_ID_MAP_SUCCEED6": 59, + "DBG_LB_STALE_CT": 60, + "DBG_INHERIT_IDENTITY": 61, + "DBG_SK_LOOKUP4": 62, + "DBG_SK_LOOKUP6": 63, + "DBG_SK_ASSIGN": 64, + "DBG_L7_LB": 65, + "DBG_SKIP_POLICY": 66, + } +) + +func (x DebugEventType) Enum() *DebugEventType { + p := new(DebugEventType) + *p = x + return p +} + +func (x DebugEventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DebugEventType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[13].Descriptor() +} + +func (DebugEventType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[13] +} + +func (x DebugEventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DebugEventType.Descriptor instead. +func (DebugEventType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{13} +} + +type Flow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // uuid is a universally unique identifier for this flow. + Uuid string `protobuf:"bytes,34,opt,name=uuid,proto3" json:"uuid,omitempty"` + Verdict Verdict `protobuf:"varint,2,opt,name=verdict,proto3,enum=flow.Verdict" json:"verdict,omitempty"` + // only applicable to Verdict = DROPPED. + // deprecated in favor of drop_reason_desc. + // + // Deprecated: Marked as deprecated in flow/flow.proto. + DropReason uint32 `protobuf:"varint,3,opt,name=drop_reason,json=dropReason,proto3" json:"drop_reason,omitempty"` + // auth_type is the authentication type specified for the flow in Cilium Network Policy. + // Only set on policy verdict events. + AuthType AuthType `protobuf:"varint,35,opt,name=auth_type,json=authType,proto3,enum=flow.AuthType" json:"auth_type,omitempty"` + // l2 + Ethernet *Ethernet `protobuf:"bytes,4,opt,name=ethernet,proto3" json:"ethernet,omitempty"` + // l3 + IP *IP `protobuf:"bytes,5,opt,name=IP,proto3" json:"IP,omitempty"` + // l4 + L4 *Layer4 `protobuf:"bytes,6,opt,name=l4,proto3" json:"l4,omitempty"` + Source *Endpoint `protobuf:"bytes,8,opt,name=source,proto3" json:"source,omitempty"` + Destination *Endpoint `protobuf:"bytes,9,opt,name=destination,proto3" json:"destination,omitempty"` + Type FlowType `protobuf:"varint,10,opt,name=Type,proto3,enum=flow.FlowType" json:"Type,omitempty"` + // NodeName is the name of the node from which this Flow was captured. + NodeName string `protobuf:"bytes,11,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // all names the source IP can have. + SourceNames []string `protobuf:"bytes,13,rep,name=source_names,json=sourceNames,proto3" json:"source_names,omitempty"` + // all names the destination IP can have. + DestinationNames []string `protobuf:"bytes,14,rep,name=destination_names,json=destinationNames,proto3" json:"destination_names,omitempty"` + // L7 information. This field is set if and only if FlowType is L7. + L7 *Layer7 `protobuf:"bytes,15,opt,name=l7,proto3" json:"l7,omitempty"` + // Deprecated. This suffers from false negatives due to protobuf not being + // able to distinguish between the value being false or it being absent. + // Please use is_reply instead. + // + // Deprecated: Marked as deprecated in flow/flow.proto. + Reply bool `protobuf:"varint,16,opt,name=reply,proto3" json:"reply,omitempty"` + // EventType of the originating Cilium event + EventType *CiliumEventType `protobuf:"bytes,19,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + // source_service contains the service name of the source + SourceService *Service `protobuf:"bytes,20,opt,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"` + // destination_service contains the service name of the destination + DestinationService *Service `protobuf:"bytes,21,opt,name=destination_service,json=destinationService,proto3" json:"destination_service,omitempty"` + // traffic_direction of the connection, e.g. ingress or egress + TrafficDirection TrafficDirection `protobuf:"varint,22,opt,name=traffic_direction,json=trafficDirection,proto3,enum=flow.TrafficDirection" json:"traffic_direction,omitempty"` + // policy_match_type is only applicable to the cilium event type PolicyVerdict + // https://github.com/cilium/cilium/blob/e831859b5cc336c6d964a6d35bbd34d1840e21b9/pkg/monitor/datapath_policy.go#L50 + PolicyMatchType uint32 `protobuf:"varint,23,opt,name=policy_match_type,json=policyMatchType,proto3" json:"policy_match_type,omitempty"` + // Only applicable to cilium trace notifications, blank for other types. + TraceObservationPoint TraceObservationPoint `protobuf:"varint,24,opt,name=trace_observation_point,json=traceObservationPoint,proto3,enum=flow.TraceObservationPoint" json:"trace_observation_point,omitempty"` + // only applicable to Verdict = DROPPED. + DropReasonDesc DropReason `protobuf:"varint,25,opt,name=drop_reason_desc,json=dropReasonDesc,proto3,enum=flow.DropReason" json:"drop_reason_desc,omitempty"` + // is_reply indicates that this was a packet (L4) or message (L7) in the + // reply direction. May be absent (in which case it is unknown whether it + // is a reply or not). + IsReply *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=is_reply,json=isReply,proto3" json:"is_reply,omitempty"` + // Only applicable to cilium debug capture events, blank for other types + DebugCapturePoint DebugCapturePoint `protobuf:"varint,27,opt,name=debug_capture_point,json=debugCapturePoint,proto3,enum=flow.DebugCapturePoint" json:"debug_capture_point,omitempty"` + // interface is the network interface on which this flow was observed + Interface *NetworkInterface `protobuf:"bytes,28,opt,name=interface,proto3" json:"interface,omitempty"` + // proxy_port indicates the port of the proxy to which the flow was forwarded + ProxyPort uint32 `protobuf:"varint,29,opt,name=proxy_port,json=proxyPort,proto3" json:"proxy_port,omitempty"` + // trace_context contains information about a trace related to the flow, if + // any. + TraceContext *TraceContext `protobuf:"bytes,30,opt,name=trace_context,json=traceContext,proto3" json:"trace_context,omitempty"` + // sock_xlate_point is the socket translation point. + // Only applicable to TraceSock notifications, blank for other types + SockXlatePoint SocketTranslationPoint `protobuf:"varint,31,opt,name=sock_xlate_point,json=sockXlatePoint,proto3,enum=flow.SocketTranslationPoint" json:"sock_xlate_point,omitempty"` + // socket_cookie is the Linux kernel socket cookie for this flow. + // Only applicable to TraceSock notifications, zero for other types + SocketCookie uint64 `protobuf:"varint,32,opt,name=socket_cookie,json=socketCookie,proto3" json:"socket_cookie,omitempty"` + // cgroup_id of the process which emitted this event. + // Only applicable to TraceSock notifications, zero for other types + CgroupId uint64 `protobuf:"varint,33,opt,name=cgroup_id,json=cgroupId,proto3" json:"cgroup_id,omitempty"` + // This is a temporary workaround to support summary field for pb.Flow without + // duplicating logic from the old parser. This field will be removed once we + // fully migrate to the new parser. + // + // Deprecated: Marked as deprecated in flow/flow.proto. + Summary string `protobuf:"bytes,100000,opt,name=Summary,proto3" json:"Summary,omitempty"` + // extensions can be used to add arbitrary additional metadata to flows. + // This can be used to extend functionality for other Hubble compatible + // APIs, or experiment with new functionality without needing to change the public API. + Extensions *anypb.Any `protobuf:"bytes,150000,opt,name=extensions,proto3" json:"extensions,omitempty"` + // The CiliumNetworkPolicies allowing the egress of the flow. + EgressAllowedBy []*Policy `protobuf:"bytes,21001,rep,name=egress_allowed_by,json=egressAllowedBy,proto3" json:"egress_allowed_by,omitempty"` + // The CiliumNetworkPolicies allowing the ingress of the flow. + IngressAllowedBy []*Policy `protobuf:"bytes,21002,rep,name=ingress_allowed_by,json=ingressAllowedBy,proto3" json:"ingress_allowed_by,omitempty"` +} + +func (x *Flow) Reset() { + *x = Flow{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Flow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Flow) ProtoMessage() {} + +func (x *Flow) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Flow.ProtoReflect.Descriptor instead. +func (*Flow) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{0} +} + +func (x *Flow) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +func (x *Flow) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *Flow) GetVerdict() Verdict { + if x != nil { + return x.Verdict + } + return Verdict_VERDICT_UNKNOWN +} + +// Deprecated: Marked as deprecated in flow/flow.proto. +func (x *Flow) GetDropReason() uint32 { + if x != nil { + return x.DropReason + } + return 0 +} + +func (x *Flow) GetAuthType() AuthType { + if x != nil { + return x.AuthType + } + return AuthType_DISABLED +} + +func (x *Flow) GetEthernet() *Ethernet { + if x != nil { + return x.Ethernet + } + return nil +} + +func (x *Flow) GetIP() *IP { + if x != nil { + return x.IP + } + return nil +} + +func (x *Flow) GetL4() *Layer4 { + if x != nil { + return x.L4 + } + return nil +} + +func (x *Flow) GetSource() *Endpoint { + if x != nil { + return x.Source + } + return nil +} + +func (x *Flow) GetDestination() *Endpoint { + if x != nil { + return x.Destination + } + return nil +} + +func (x *Flow) GetType() FlowType { + if x != nil { + return x.Type + } + return FlowType_UNKNOWN_TYPE +} + +func (x *Flow) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + +func (x *Flow) GetSourceNames() []string { + if x != nil { + return x.SourceNames + } + return nil +} + +func (x *Flow) GetDestinationNames() []string { + if x != nil { + return x.DestinationNames + } + return nil +} + +func (x *Flow) GetL7() *Layer7 { + if x != nil { + return x.L7 + } + return nil +} + +// Deprecated: Marked as deprecated in flow/flow.proto. +func (x *Flow) GetReply() bool { + if x != nil { + return x.Reply + } + return false +} + +func (x *Flow) GetEventType() *CiliumEventType { + if x != nil { + return x.EventType + } + return nil +} + +func (x *Flow) GetSourceService() *Service { + if x != nil { + return x.SourceService + } + return nil +} + +func (x *Flow) GetDestinationService() *Service { + if x != nil { + return x.DestinationService + } + return nil +} + +func (x *Flow) GetTrafficDirection() TrafficDirection { + if x != nil { + return x.TrafficDirection + } + return TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN +} + +func (x *Flow) GetPolicyMatchType() uint32 { + if x != nil { + return x.PolicyMatchType + } + return 0 +} + +func (x *Flow) GetTraceObservationPoint() TraceObservationPoint { + if x != nil { + return x.TraceObservationPoint + } + return TraceObservationPoint_UNKNOWN_POINT +} + +func (x *Flow) GetDropReasonDesc() DropReason { + if x != nil { + return x.DropReasonDesc + } + return DropReason_DROP_REASON_UNKNOWN +} + +func (x *Flow) GetIsReply() *wrapperspb.BoolValue { + if x != nil { + return x.IsReply + } + return nil +} + +func (x *Flow) GetDebugCapturePoint() DebugCapturePoint { + if x != nil { + return x.DebugCapturePoint + } + return DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN +} + +func (x *Flow) GetInterface() *NetworkInterface { + if x != nil { + return x.Interface + } + return nil +} + +func (x *Flow) GetProxyPort() uint32 { + if x != nil { + return x.ProxyPort + } + return 0 +} + +func (x *Flow) GetTraceContext() *TraceContext { + if x != nil { + return x.TraceContext + } + return nil +} + +func (x *Flow) GetSockXlatePoint() SocketTranslationPoint { + if x != nil { + return x.SockXlatePoint + } + return SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN +} + +func (x *Flow) GetSocketCookie() uint64 { + if x != nil { + return x.SocketCookie + } + return 0 +} + +func (x *Flow) GetCgroupId() uint64 { + if x != nil { + return x.CgroupId + } + return 0 +} + +// Deprecated: Marked as deprecated in flow/flow.proto. +func (x *Flow) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Flow) GetExtensions() *anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Flow) GetEgressAllowedBy() []*Policy { + if x != nil { + return x.EgressAllowedBy + } + return nil +} + +func (x *Flow) GetIngressAllowedBy() []*Policy { + if x != nil { + return x.IngressAllowedBy + } + return nil +} + +type Layer4 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Protocol: + // + // *Layer4_TCP + // *Layer4_UDP + // *Layer4_ICMPv4 + // *Layer4_ICMPv6 + // *Layer4_SCTP + Protocol isLayer4_Protocol `protobuf_oneof:"protocol"` +} + +func (x *Layer4) Reset() { + *x = Layer4{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Layer4) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Layer4) ProtoMessage() {} + +func (x *Layer4) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Layer4.ProtoReflect.Descriptor instead. +func (*Layer4) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{1} +} + +func (m *Layer4) GetProtocol() isLayer4_Protocol { + if m != nil { + return m.Protocol + } + return nil +} + +func (x *Layer4) GetTCP() *TCP { + if x, ok := x.GetProtocol().(*Layer4_TCP); ok { + return x.TCP + } + return nil +} + +func (x *Layer4) GetUDP() *UDP { + if x, ok := x.GetProtocol().(*Layer4_UDP); ok { + return x.UDP + } + return nil +} + +func (x *Layer4) GetICMPv4() *ICMPv4 { + if x, ok := x.GetProtocol().(*Layer4_ICMPv4); ok { + return x.ICMPv4 + } + return nil +} + +func (x *Layer4) GetICMPv6() *ICMPv6 { + if x, ok := x.GetProtocol().(*Layer4_ICMPv6); ok { + return x.ICMPv6 + } + return nil +} + +func (x *Layer4) GetSCTP() *SCTP { + if x, ok := x.GetProtocol().(*Layer4_SCTP); ok { + return x.SCTP + } + return nil +} + +type isLayer4_Protocol interface { + isLayer4_Protocol() +} + +type Layer4_TCP struct { + TCP *TCP `protobuf:"bytes,1,opt,name=TCP,proto3,oneof"` +} + +type Layer4_UDP struct { + UDP *UDP `protobuf:"bytes,2,opt,name=UDP,proto3,oneof"` +} + +type Layer4_ICMPv4 struct { + // ICMP is technically not L4, but mutually exclusive with the above + ICMPv4 *ICMPv4 `protobuf:"bytes,3,opt,name=ICMPv4,proto3,oneof"` +} + +type Layer4_ICMPv6 struct { + ICMPv6 *ICMPv6 `protobuf:"bytes,4,opt,name=ICMPv6,proto3,oneof"` +} + +type Layer4_SCTP struct { + SCTP *SCTP `protobuf:"bytes,5,opt,name=SCTP,proto3,oneof"` +} + +func (*Layer4_TCP) isLayer4_Protocol() {} + +func (*Layer4_UDP) isLayer4_Protocol() {} + +func (*Layer4_ICMPv4) isLayer4_Protocol() {} + +func (*Layer4_ICMPv6) isLayer4_Protocol() {} + +func (*Layer4_SCTP) isLayer4_Protocol() {} + +// Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141): +type Layer7 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type L7FlowType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.L7FlowType" json:"type,omitempty"` + // Latency of the response + LatencyNs uint64 `protobuf:"varint,2,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"` + // L7 field. This field is set if and only if FlowType is L7. + // + // Types that are assignable to Record: + // + // *Layer7_Dns + // *Layer7_Http + // *Layer7_Kafka + Record isLayer7_Record `protobuf_oneof:"record"` +} + +func (x *Layer7) Reset() { + *x = Layer7{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Layer7) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Layer7) ProtoMessage() {} + +func (x *Layer7) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Layer7.ProtoReflect.Descriptor instead. +func (*Layer7) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{2} +} + +func (x *Layer7) GetType() L7FlowType { + if x != nil { + return x.Type + } + return L7FlowType_UNKNOWN_L7_TYPE +} + +func (x *Layer7) GetLatencyNs() uint64 { + if x != nil { + return x.LatencyNs + } + return 0 +} + +func (m *Layer7) GetRecord() isLayer7_Record { + if m != nil { + return m.Record + } + return nil +} + +func (x *Layer7) GetDns() *DNS { + if x, ok := x.GetRecord().(*Layer7_Dns); ok { + return x.Dns + } + return nil +} + +func (x *Layer7) GetHttp() *HTTP { + if x, ok := x.GetRecord().(*Layer7_Http); ok { + return x.Http + } + return nil +} + +func (x *Layer7) GetKafka() *Kafka { + if x, ok := x.GetRecord().(*Layer7_Kafka); ok { + return x.Kafka + } + return nil +} + +type isLayer7_Record interface { + isLayer7_Record() +} + +type Layer7_Dns struct { + Dns *DNS `protobuf:"bytes,100,opt,name=dns,proto3,oneof"` +} + +type Layer7_Http struct { + Http *HTTP `protobuf:"bytes,101,opt,name=http,proto3,oneof"` +} + +type Layer7_Kafka struct { + Kafka *Kafka `protobuf:"bytes,102,opt,name=kafka,proto3,oneof"` +} + +func (*Layer7_Dns) isLayer7_Record() {} + +func (*Layer7_Http) isLayer7_Record() {} + +func (*Layer7_Kafka) isLayer7_Record() {} + +// TraceContext contains trace context propagation data, i.e. information about a +// distributed trace. +// For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/). +type TraceContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parent identifies the incoming request in a tracing system. + Parent *TraceParent `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` +} + +func (x *TraceContext) Reset() { + *x = TraceContext{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceContext) ProtoMessage() {} + +func (x *TraceContext) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceContext.ProtoReflect.Descriptor instead. +func (*TraceContext) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{3} +} + +func (x *TraceContext) GetParent() *TraceParent { + if x != nil { + return x.Parent + } + return nil +} + +// TraceParent identifies the incoming request in a tracing system. +type TraceParent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // trace_id is a unique value that identifies a trace. It is a byte array + // represented as a hex string. + TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` +} + +func (x *TraceParent) Reset() { + *x = TraceParent{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceParent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceParent) ProtoMessage() {} + +func (x *TraceParent) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceParent.ProtoReflect.Descriptor instead. +func (*TraceParent) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{4} +} + +func (x *TraceParent) GetTraceId() string { + if x != nil { + return x.TraceId + } + return "" +} + +type Endpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID uint32 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + Identity uint32 `protobuf:"varint,2,opt,name=identity,proto3" json:"identity,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + // labels in `foo=bar` format. + Labels []string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + PodName string `protobuf:"bytes,5,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + Workloads []*Workload `protobuf:"bytes,6,rep,name=workloads,proto3" json:"workloads,omitempty"` +} + +func (x *Endpoint) Reset() { + *x = Endpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint) ProtoMessage() {} + +func (x *Endpoint) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. +func (*Endpoint) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{5} +} + +func (x *Endpoint) GetID() uint32 { + if x != nil { + return x.ID + } + return 0 +} + +func (x *Endpoint) GetIdentity() uint32 { + if x != nil { + return x.Identity + } + return 0 +} + +func (x *Endpoint) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Endpoint) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Endpoint) GetPodName() string { + if x != nil { + return x.PodName + } + return "" +} + +func (x *Endpoint) GetWorkloads() []*Workload { + if x != nil { + return x.Workloads + } + return nil +} + +type Workload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` +} + +func (x *Workload) Reset() { + *x = Workload{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workload) ProtoMessage() {} + +func (x *Workload) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workload.ProtoReflect.Descriptor instead. +func (*Workload) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{6} +} + +func (x *Workload) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Workload) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +type TCP struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"` + DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + Flags *TCPFlags `protobuf:"bytes,3,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *TCP) Reset() { + *x = TCP{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TCP) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TCP) ProtoMessage() {} + +func (x *TCP) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TCP.ProtoReflect.Descriptor instead. +func (*TCP) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{7} +} + +func (x *TCP) GetSourcePort() uint32 { + if x != nil { + return x.SourcePort + } + return 0 +} + +func (x *TCP) GetDestinationPort() uint32 { + if x != nil { + return x.DestinationPort + } + return 0 +} + +func (x *TCP) GetFlags() *TCPFlags { + if x != nil { + return x.Flags + } + return nil +} + +type IP struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` + IpVersion IPVersion `protobuf:"varint,3,opt,name=ipVersion,proto3,enum=flow.IPVersion" json:"ipVersion,omitempty"` + // This field indicates whether the TraceReasonEncryptMask is set or not. + // https://github.com/cilium/cilium/blob/ba0ed147bd5bb342f67b1794c2ad13c6e99d5236/pkg/monitor/datapath_trace.go#L27 + Encrypted bool `protobuf:"varint,4,opt,name=encrypted,proto3" json:"encrypted,omitempty"` +} + +func (x *IP) Reset() { + *x = IP{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IP) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IP) ProtoMessage() {} + +func (x *IP) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IP.ProtoReflect.Descriptor instead. +func (*IP) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{8} +} + +func (x *IP) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *IP) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +func (x *IP) GetIpVersion() IPVersion { + if x != nil { + return x.IpVersion + } + return IPVersion_IP_NOT_USED +} + +func (x *IP) GetEncrypted() bool { + if x != nil { + return x.Encrypted + } + return false +} + +type Ethernet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` +} + +func (x *Ethernet) Reset() { + *x = Ethernet{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Ethernet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Ethernet) ProtoMessage() {} + +func (x *Ethernet) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Ethernet.ProtoReflect.Descriptor instead. +func (*Ethernet) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{9} +} + +func (x *Ethernet) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *Ethernet) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +type TCPFlags struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FIN bool `protobuf:"varint,1,opt,name=FIN,proto3" json:"FIN,omitempty"` + SYN bool `protobuf:"varint,2,opt,name=SYN,proto3" json:"SYN,omitempty"` + RST bool `protobuf:"varint,3,opt,name=RST,proto3" json:"RST,omitempty"` + PSH bool `protobuf:"varint,4,opt,name=PSH,proto3" json:"PSH,omitempty"` + ACK bool `protobuf:"varint,5,opt,name=ACK,proto3" json:"ACK,omitempty"` + URG bool `protobuf:"varint,6,opt,name=URG,proto3" json:"URG,omitempty"` + ECE bool `protobuf:"varint,7,opt,name=ECE,proto3" json:"ECE,omitempty"` + CWR bool `protobuf:"varint,8,opt,name=CWR,proto3" json:"CWR,omitempty"` + NS bool `protobuf:"varint,9,opt,name=NS,proto3" json:"NS,omitempty"` +} + +func (x *TCPFlags) Reset() { + *x = TCPFlags{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TCPFlags) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TCPFlags) ProtoMessage() {} + +func (x *TCPFlags) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TCPFlags.ProtoReflect.Descriptor instead. +func (*TCPFlags) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{10} +} + +func (x *TCPFlags) GetFIN() bool { + if x != nil { + return x.FIN + } + return false +} + +func (x *TCPFlags) GetSYN() bool { + if x != nil { + return x.SYN + } + return false +} + +func (x *TCPFlags) GetRST() bool { + if x != nil { + return x.RST + } + return false +} + +func (x *TCPFlags) GetPSH() bool { + if x != nil { + return x.PSH + } + return false +} + +func (x *TCPFlags) GetACK() bool { + if x != nil { + return x.ACK + } + return false +} + +func (x *TCPFlags) GetURG() bool { + if x != nil { + return x.URG + } + return false +} + +func (x *TCPFlags) GetECE() bool { + if x != nil { + return x.ECE + } + return false +} + +func (x *TCPFlags) GetCWR() bool { + if x != nil { + return x.CWR + } + return false +} + +func (x *TCPFlags) GetNS() bool { + if x != nil { + return x.NS + } + return false +} + +type UDP struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"` + DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` +} + +func (x *UDP) Reset() { + *x = UDP{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UDP) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UDP) ProtoMessage() {} + +func (x *UDP) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UDP.ProtoReflect.Descriptor instead. +func (*UDP) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{11} +} + +func (x *UDP) GetSourcePort() uint32 { + if x != nil { + return x.SourcePort + } + return 0 +} + +func (x *UDP) GetDestinationPort() uint32 { + if x != nil { + return x.DestinationPort + } + return 0 +} + +type SCTP struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"` + DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` +} + +func (x *SCTP) Reset() { + *x = SCTP{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SCTP) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SCTP) ProtoMessage() {} + +func (x *SCTP) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SCTP.ProtoReflect.Descriptor instead. +func (*SCTP) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{12} +} + +func (x *SCTP) GetSourcePort() uint32 { + if x != nil { + return x.SourcePort + } + return 0 +} + +func (x *SCTP) GetDestinationPort() uint32 { + if x != nil { + return x.DestinationPort + } + return 0 +} + +type ICMPv4 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + Code uint32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *ICMPv4) Reset() { + *x = ICMPv4{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ICMPv4) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ICMPv4) ProtoMessage() {} + +func (x *ICMPv4) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ICMPv4.ProtoReflect.Descriptor instead. +func (*ICMPv4) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{13} +} + +func (x *ICMPv4) GetType() uint32 { + if x != nil { + return x.Type + } + return 0 +} + +func (x *ICMPv4) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +type ICMPv6 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + Code uint32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *ICMPv6) Reset() { + *x = ICMPv6{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ICMPv6) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ICMPv6) ProtoMessage() {} + +func (x *ICMPv6) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ICMPv6.ProtoReflect.Descriptor instead. +func (*ICMPv6) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{14} +} + +func (x *ICMPv6) GetType() uint32 { + if x != nil { + return x.Type + } + return 0 +} + +func (x *ICMPv6) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +type Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Labels []string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"` + Revision uint64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{15} +} + +func (x *Policy) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Policy) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Policy) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Policy) GetRevision() uint64 { + if x != nil { + return x.Revision + } + return 0 +} + +// EventTypeFilter is a filter describing a particular event type. +type EventTypeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the primary flow type as defined by: + // github.com/cilium/cilium/pkg/monitor/api.MessageType* + Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + // match_sub_type is set to true when matching on the sub_type should + // be done. This flag is required as 0 is a valid sub_type. + MatchSubType bool `protobuf:"varint,2,opt,name=match_sub_type,json=matchSubType,proto3" json:"match_sub_type,omitempty"` + // sub_type is the secondary type, e.g. + // - github.com/cilium/cilium/pkg/monitor/api.Trace* + SubType int32 `protobuf:"varint,3,opt,name=sub_type,json=subType,proto3" json:"sub_type,omitempty"` +} + +func (x *EventTypeFilter) Reset() { + *x = EventTypeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventTypeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventTypeFilter) ProtoMessage() {} + +func (x *EventTypeFilter) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventTypeFilter.ProtoReflect.Descriptor instead. +func (*EventTypeFilter) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{16} +} + +func (x *EventTypeFilter) GetType() int32 { + if x != nil { + return x.Type + } + return 0 +} + +func (x *EventTypeFilter) GetMatchSubType() bool { + if x != nil { + return x.MatchSubType + } + return false +} + +func (x *EventTypeFilter) GetSubType() int32 { + if x != nil { + return x.SubType + } + return 0 +} + +// CiliumEventType from which the flow originated. +type CiliumEventType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type of event the flow originated from, i.e. + // github.com/cilium/cilium/pkg/monitor/api.MessageType* + Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + // sub_type may indicate more details depending on type, e.g. + // - github.com/cilium/cilium/pkg/monitor/api.Trace* + // - github.com/cilium/cilium/pkg/monitor/api.Drop* + // - github.com/cilium/cilium/pkg/monitor/api.DbgCapture* + SubType int32 `protobuf:"varint,2,opt,name=sub_type,json=subType,proto3" json:"sub_type,omitempty"` +} + +func (x *CiliumEventType) Reset() { + *x = CiliumEventType{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CiliumEventType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CiliumEventType) ProtoMessage() {} + +func (x *CiliumEventType) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CiliumEventType.ProtoReflect.Descriptor instead. +func (*CiliumEventType) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{17} +} + +func (x *CiliumEventType) GetType() int32 { + if x != nil { + return x.Type + } + return 0 +} + +func (x *CiliumEventType) GetSubType() int32 { + if x != nil { + return x.SubType + } + return 0 +} + +// FlowFilter represent an individual flow filter. All fields are optional. If +// multiple fields are set, then all fields must match for the filter to match. +type FlowFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // uuid filters by a list of flow uuids. + Uuid []string `protobuf:"bytes,29,rep,name=uuid,proto3" json:"uuid,omitempty"` + // source_ip filters by a list of source ips. Each of the source ips can be + // specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g. + // "1.1.1.0/24"). + SourceIp []string `protobuf:"bytes,1,rep,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"` + // source_pod filters by a list of source pod name prefixes, optionally + // within a given namespace (e.g. "xwing", "kube-system/coredns-"). + // The pod name can be omitted to only filter by namespace + // (e.g. "kube-system/") or the namespace can be omitted to filter for + // pods in any namespace (e.g. "/xwing") + SourcePod []string `protobuf:"bytes,2,rep,name=source_pod,json=sourcePod,proto3" json:"source_pod,omitempty"` + // source_fqdn filters by a list of source fully qualified domain names + SourceFqdn []string `protobuf:"bytes,7,rep,name=source_fqdn,json=sourceFqdn,proto3" json:"source_fqdn,omitempty"` + // source_labels filters on a list of source label selectors. Selectors + // support the full Kubernetes label selector syntax. + SourceLabel []string `protobuf:"bytes,10,rep,name=source_label,json=sourceLabel,proto3" json:"source_label,omitempty"` + // source_service filters on a list of source service names. This field + // supports the same syntax as the source_pod field. + SourceService []string `protobuf:"bytes,16,rep,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"` + // source_workload filters by a list of source workload. + SourceWorkload []*Workload `protobuf:"bytes,26,rep,name=source_workload,json=sourceWorkload,proto3" json:"source_workload,omitempty"` + // destination_ip filters by a list of destination ips. Each of the + // destination ips can be specified as an exact match (e.g. "1.1.1.1") or + // as a CIDR range (e.g. "1.1.1.0/24"). + DestinationIp []string `protobuf:"bytes,3,rep,name=destination_ip,json=destinationIp,proto3" json:"destination_ip,omitempty"` + // destination_pod filters by a list of destination pod names + DestinationPod []string `protobuf:"bytes,4,rep,name=destination_pod,json=destinationPod,proto3" json:"destination_pod,omitempty"` + // destination_fqdn filters by a list of destination fully qualified domain names + DestinationFqdn []string `protobuf:"bytes,8,rep,name=destination_fqdn,json=destinationFqdn,proto3" json:"destination_fqdn,omitempty"` + // destination_label filters on a list of destination label selectors + DestinationLabel []string `protobuf:"bytes,11,rep,name=destination_label,json=destinationLabel,proto3" json:"destination_label,omitempty"` + // destination_service filters on a list of destination service names + DestinationService []string `protobuf:"bytes,17,rep,name=destination_service,json=destinationService,proto3" json:"destination_service,omitempty"` + // destination_workload filters by a list of destination workload. + DestinationWorkload []*Workload `protobuf:"bytes,27,rep,name=destination_workload,json=destinationWorkload,proto3" json:"destination_workload,omitempty"` + // traffic_direction filters flow by direction of the connection, e.g. + // ingress or egress. + TrafficDirection []TrafficDirection `protobuf:"varint,30,rep,packed,name=traffic_direction,json=trafficDirection,proto3,enum=flow.TrafficDirection" json:"traffic_direction,omitempty"` + // only return Flows that were classified with a particular verdict. + Verdict []Verdict `protobuf:"varint,5,rep,packed,name=verdict,proto3,enum=flow.Verdict" json:"verdict,omitempty"` + // event_type is the list of event types to filter on + EventType []*EventTypeFilter `protobuf:"bytes,6,rep,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + // http_status_code is a list of string prefixes (e.g. "4+", "404", "5+") + // to filter on the HTTP status code + HttpStatusCode []string `protobuf:"bytes,9,rep,name=http_status_code,json=httpStatusCode,proto3" json:"http_status_code,omitempty"` + // protocol filters flows by L4 or L7 protocol, e.g. (e.g. "tcp", "http") + Protocol []string `protobuf:"bytes,12,rep,name=protocol,proto3" json:"protocol,omitempty"` + // source_port filters flows by L4 source port + SourcePort []string `protobuf:"bytes,13,rep,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"` + // destination_port filters flows by L4 destination port + DestinationPort []string `protobuf:"bytes,14,rep,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + // reply filters flows based on the direction of the flow. + Reply []bool `protobuf:"varint,15,rep,packed,name=reply,proto3" json:"reply,omitempty"` + // dns_query filters L7 DNS flows by query patterns (RE2 regex), e.g. 'kube.*local'. + DnsQuery []string `protobuf:"bytes,18,rep,name=dns_query,json=dnsQuery,proto3" json:"dns_query,omitempty"` + // source_identity filters by the security identity of the source endpoint. + SourceIdentity []uint32 `protobuf:"varint,19,rep,packed,name=source_identity,json=sourceIdentity,proto3" json:"source_identity,omitempty"` + // destination_identity filters by the security identity of the destination endpoint. + DestinationIdentity []uint32 `protobuf:"varint,20,rep,packed,name=destination_identity,json=destinationIdentity,proto3" json:"destination_identity,omitempty"` + // GET, POST, PUT, etc. methods. This type of field is well suited for an + // enum but every single existing place is using a string already. + HttpMethod []string `protobuf:"bytes,21,rep,name=http_method,json=httpMethod,proto3" json:"http_method,omitempty"` + // http_path is a list of regular expressions to filter on the HTTP path. + HttpPath []string `protobuf:"bytes,22,rep,name=http_path,json=httpPath,proto3" json:"http_path,omitempty"` + // http_url is a list of regular expressions to filter on the HTTP URL. + HttpUrl []string `protobuf:"bytes,31,rep,name=http_url,json=httpUrl,proto3" json:"http_url,omitempty"` + // http_header is a list of key:value pairs to filter on the HTTP headers. + HttpHeader []*HTTPHeader `protobuf:"bytes,32,rep,name=http_header,json=httpHeader,proto3" json:"http_header,omitempty"` + // tcp_flags filters flows based on TCP header flags + TcpFlags []*TCPFlags `protobuf:"bytes,23,rep,name=tcp_flags,json=tcpFlags,proto3" json:"tcp_flags,omitempty"` + // node_name is a list of patterns to filter on the node name, e.g. "k8s*", + // "test-cluster/*.domain.com", "cluster-name/" etc. + NodeName []string `protobuf:"bytes,24,rep,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // filter based on IP version (ipv4 or ipv6) + IpVersion []IPVersion `protobuf:"varint,25,rep,packed,name=ip_version,json=ipVersion,proto3,enum=flow.IPVersion" json:"ip_version,omitempty"` + // trace_id filters flows by trace ID + TraceId []string `protobuf:"bytes,28,rep,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` +} + +func (x *FlowFilter) Reset() { + *x = FlowFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FlowFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FlowFilter) ProtoMessage() {} + +func (x *FlowFilter) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FlowFilter.ProtoReflect.Descriptor instead. +func (*FlowFilter) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{18} +} + +func (x *FlowFilter) GetUuid() []string { + if x != nil { + return x.Uuid + } + return nil +} + +func (x *FlowFilter) GetSourceIp() []string { + if x != nil { + return x.SourceIp + } + return nil +} + +func (x *FlowFilter) GetSourcePod() []string { + if x != nil { + return x.SourcePod + } + return nil +} + +func (x *FlowFilter) GetSourceFqdn() []string { + if x != nil { + return x.SourceFqdn + } + return nil +} + +func (x *FlowFilter) GetSourceLabel() []string { + if x != nil { + return x.SourceLabel + } + return nil +} + +func (x *FlowFilter) GetSourceService() []string { + if x != nil { + return x.SourceService + } + return nil +} + +func (x *FlowFilter) GetSourceWorkload() []*Workload { + if x != nil { + return x.SourceWorkload + } + return nil +} + +func (x *FlowFilter) GetDestinationIp() []string { + if x != nil { + return x.DestinationIp + } + return nil +} + +func (x *FlowFilter) GetDestinationPod() []string { + if x != nil { + return x.DestinationPod + } + return nil +} + +func (x *FlowFilter) GetDestinationFqdn() []string { + if x != nil { + return x.DestinationFqdn + } + return nil +} + +func (x *FlowFilter) GetDestinationLabel() []string { + if x != nil { + return x.DestinationLabel + } + return nil +} + +func (x *FlowFilter) GetDestinationService() []string { + if x != nil { + return x.DestinationService + } + return nil +} + +func (x *FlowFilter) GetDestinationWorkload() []*Workload { + if x != nil { + return x.DestinationWorkload + } + return nil +} + +func (x *FlowFilter) GetTrafficDirection() []TrafficDirection { + if x != nil { + return x.TrafficDirection + } + return nil +} + +func (x *FlowFilter) GetVerdict() []Verdict { + if x != nil { + return x.Verdict + } + return nil +} + +func (x *FlowFilter) GetEventType() []*EventTypeFilter { + if x != nil { + return x.EventType + } + return nil +} + +func (x *FlowFilter) GetHttpStatusCode() []string { + if x != nil { + return x.HttpStatusCode + } + return nil +} + +func (x *FlowFilter) GetProtocol() []string { + if x != nil { + return x.Protocol + } + return nil +} + +func (x *FlowFilter) GetSourcePort() []string { + if x != nil { + return x.SourcePort + } + return nil +} + +func (x *FlowFilter) GetDestinationPort() []string { + if x != nil { + return x.DestinationPort + } + return nil +} + +func (x *FlowFilter) GetReply() []bool { + if x != nil { + return x.Reply + } + return nil +} + +func (x *FlowFilter) GetDnsQuery() []string { + if x != nil { + return x.DnsQuery + } + return nil +} + +func (x *FlowFilter) GetSourceIdentity() []uint32 { + if x != nil { + return x.SourceIdentity + } + return nil +} + +func (x *FlowFilter) GetDestinationIdentity() []uint32 { + if x != nil { + return x.DestinationIdentity + } + return nil +} + +func (x *FlowFilter) GetHttpMethod() []string { + if x != nil { + return x.HttpMethod + } + return nil +} + +func (x *FlowFilter) GetHttpPath() []string { + if x != nil { + return x.HttpPath + } + return nil +} + +func (x *FlowFilter) GetHttpUrl() []string { + if x != nil { + return x.HttpUrl + } + return nil +} + +func (x *FlowFilter) GetHttpHeader() []*HTTPHeader { + if x != nil { + return x.HttpHeader + } + return nil +} + +func (x *FlowFilter) GetTcpFlags() []*TCPFlags { + if x != nil { + return x.TcpFlags + } + return nil +} + +func (x *FlowFilter) GetNodeName() []string { + if x != nil { + return x.NodeName + } + return nil +} + +func (x *FlowFilter) GetIpVersion() []IPVersion { + if x != nil { + return x.IpVersion + } + return nil +} + +func (x *FlowFilter) GetTraceId() []string { + if x != nil { + return x.TraceId + } + return nil +} + +// DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264): +type DNS struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DNS name that's being looked up: e.g. "isovalent.com." + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // List of IP addresses in the DNS response. + Ips []string `protobuf:"bytes,2,rep,name=ips,proto3" json:"ips,omitempty"` + // TTL in the DNS response. + Ttl uint32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + // List of CNames in the DNS response. + Cnames []string `protobuf:"bytes,4,rep,name=cnames,proto3" json:"cnames,omitempty"` + // Corresponds to DNSDataSource defined in: + // + // https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L253 + ObservationSource string `protobuf:"bytes,5,opt,name=observation_source,json=observationSource,proto3" json:"observation_source,omitempty"` + // Return code of the DNS request defined in: + // + // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6 + Rcode uint32 `protobuf:"varint,6,opt,name=rcode,proto3" json:"rcode,omitempty"` + // String representation of qtypes defined in: + // + // https://tools.ietf.org/html/rfc1035#section-3.2.3 + Qtypes []string `protobuf:"bytes,7,rep,name=qtypes,proto3" json:"qtypes,omitempty"` + // String representation of rrtypes defined in: + // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4 + Rrtypes []string `protobuf:"bytes,8,rep,name=rrtypes,proto3" json:"rrtypes,omitempty"` +} + +func (x *DNS) Reset() { + *x = DNS{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DNS) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DNS) ProtoMessage() {} + +func (x *DNS) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DNS.ProtoReflect.Descriptor instead. +func (*DNS) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{19} +} + +func (x *DNS) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *DNS) GetIps() []string { + if x != nil { + return x.Ips + } + return nil +} + +func (x *DNS) GetTtl() uint32 { + if x != nil { + return x.Ttl + } + return 0 +} + +func (x *DNS) GetCnames() []string { + if x != nil { + return x.Cnames + } + return nil +} + +func (x *DNS) GetObservationSource() string { + if x != nil { + return x.ObservationSource + } + return "" +} + +func (x *DNS) GetRcode() uint32 { + if x != nil { + return x.Rcode + } + return 0 +} + +func (x *DNS) GetQtypes() []string { + if x != nil { + return x.Qtypes + } + return nil +} + +func (x *DNS) GetRrtypes() []string { + if x != nil { + return x.Rrtypes + } + return nil +} + +type HTTPHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *HTTPHeader) Reset() { + *x = HTTPHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPHeader) ProtoMessage() {} + +func (x *HTTPHeader) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPHeader.ProtoReflect.Descriptor instead. +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{20} +} + +func (x *HTTPHeader) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *HTTPHeader) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type. +type HTTP struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + Protocol string `protobuf:"bytes,4,opt,name=protocol,proto3" json:"protocol,omitempty"` + Headers []*HTTPHeader `protobuf:"bytes,5,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HTTP) Reset() { + *x = HTTP{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTP) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTP) ProtoMessage() {} + +func (x *HTTP) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTP.ProtoReflect.Descriptor instead. +func (*HTTP) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{21} +} + +func (x *HTTP) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *HTTP) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *HTTP) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *HTTP) GetProtocol() string { + if x != nil { + return x.Protocol + } + return "" +} + +func (x *HTTP) GetHeaders() []*HTTPHeader { + if x != nil { + return x.Headers + } + return nil +} + +// L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type. +type Kafka struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ApiVersion int32 `protobuf:"varint,2,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + ApiKey string `protobuf:"bytes,3,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"` + CorrelationId int32 `protobuf:"varint,4,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + Topic string `protobuf:"bytes,5,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *Kafka) Reset() { + *x = Kafka{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Kafka) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Kafka) ProtoMessage() {} + +func (x *Kafka) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Kafka.ProtoReflect.Descriptor instead. +func (*Kafka) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{22} +} + +func (x *Kafka) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *Kafka) GetApiVersion() int32 { + if x != nil { + return x.ApiVersion + } + return 0 +} + +func (x *Kafka) GetApiKey() string { + if x != nil { + return x.ApiKey + } + return "" +} + +func (x *Kafka) GetCorrelationId() int32 { + if x != nil { + return x.CorrelationId + } + return 0 +} + +func (x *Kafka) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (x *Service) Reset() { + *x = Service{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service) ProtoMessage() {} + +func (x *Service) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service.ProtoReflect.Descriptor instead. +func (*Service) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{23} +} + +func (x *Service) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Service) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +// LostEvent is a message which notifies consumers about a loss of events +// that happened before the events were captured by Hubble. +type LostEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // source is the location where events got lost. + Source LostEventSource `protobuf:"varint,1,opt,name=source,proto3,enum=flow.LostEventSource" json:"source,omitempty"` + // num_events_lost is the number of events that haven been lost at source. + NumEventsLost uint64 `protobuf:"varint,2,opt,name=num_events_lost,json=numEventsLost,proto3" json:"num_events_lost,omitempty"` + // cpu on which the event was lost if the source of lost events is + // PERF_EVENT_RING_BUFFER. + Cpu *wrapperspb.Int32Value `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` +} + +func (x *LostEvent) Reset() { + *x = LostEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LostEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LostEvent) ProtoMessage() {} + +func (x *LostEvent) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LostEvent.ProtoReflect.Descriptor instead. +func (*LostEvent) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{24} +} + +func (x *LostEvent) GetSource() LostEventSource { + if x != nil { + return x.Source + } + return LostEventSource_UNKNOWN_LOST_EVENT_SOURCE +} + +func (x *LostEvent) GetNumEventsLost() uint64 { + if x != nil { + return x.NumEventsLost + } + return 0 +} + +func (x *LostEvent) GetCpu() *wrapperspb.Int32Value { + if x != nil { + return x.Cpu + } + return nil +} + +type AgentEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type AgentEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.AgentEventType" json:"type,omitempty"` + // Types that are assignable to Notification: + // + // *AgentEvent_Unknown + // *AgentEvent_AgentStart + // *AgentEvent_PolicyUpdate + // *AgentEvent_EndpointRegenerate + // *AgentEvent_EndpointUpdate + // *AgentEvent_IpcacheUpdate + // *AgentEvent_ServiceUpsert + // *AgentEvent_ServiceDelete + Notification isAgentEvent_Notification `protobuf_oneof:"notification"` +} + +func (x *AgentEvent) Reset() { + *x = AgentEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentEvent) ProtoMessage() {} + +func (x *AgentEvent) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentEvent.ProtoReflect.Descriptor instead. +func (*AgentEvent) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{25} +} + +func (x *AgentEvent) GetType() AgentEventType { + if x != nil { + return x.Type + } + return AgentEventType_AGENT_EVENT_UNKNOWN +} + +func (m *AgentEvent) GetNotification() isAgentEvent_Notification { + if m != nil { + return m.Notification + } + return nil +} + +func (x *AgentEvent) GetUnknown() *AgentEventUnknown { + if x, ok := x.GetNotification().(*AgentEvent_Unknown); ok { + return x.Unknown + } + return nil +} + +func (x *AgentEvent) GetAgentStart() *TimeNotification { + if x, ok := x.GetNotification().(*AgentEvent_AgentStart); ok { + return x.AgentStart + } + return nil +} + +func (x *AgentEvent) GetPolicyUpdate() *PolicyUpdateNotification { + if x, ok := x.GetNotification().(*AgentEvent_PolicyUpdate); ok { + return x.PolicyUpdate + } + return nil +} + +func (x *AgentEvent) GetEndpointRegenerate() *EndpointRegenNotification { + if x, ok := x.GetNotification().(*AgentEvent_EndpointRegenerate); ok { + return x.EndpointRegenerate + } + return nil +} + +func (x *AgentEvent) GetEndpointUpdate() *EndpointUpdateNotification { + if x, ok := x.GetNotification().(*AgentEvent_EndpointUpdate); ok { + return x.EndpointUpdate + } + return nil +} + +func (x *AgentEvent) GetIpcacheUpdate() *IPCacheNotification { + if x, ok := x.GetNotification().(*AgentEvent_IpcacheUpdate); ok { + return x.IpcacheUpdate + } + return nil +} + +func (x *AgentEvent) GetServiceUpsert() *ServiceUpsertNotification { + if x, ok := x.GetNotification().(*AgentEvent_ServiceUpsert); ok { + return x.ServiceUpsert + } + return nil +} + +func (x *AgentEvent) GetServiceDelete() *ServiceDeleteNotification { + if x, ok := x.GetNotification().(*AgentEvent_ServiceDelete); ok { + return x.ServiceDelete + } + return nil +} + +type isAgentEvent_Notification interface { + isAgentEvent_Notification() +} + +type AgentEvent_Unknown struct { + Unknown *AgentEventUnknown `protobuf:"bytes,100,opt,name=unknown,proto3,oneof"` +} + +type AgentEvent_AgentStart struct { + AgentStart *TimeNotification `protobuf:"bytes,101,opt,name=agent_start,json=agentStart,proto3,oneof"` +} + +type AgentEvent_PolicyUpdate struct { + // used for POLICY_UPDATED and POLICY_DELETED + PolicyUpdate *PolicyUpdateNotification `protobuf:"bytes,102,opt,name=policy_update,json=policyUpdate,proto3,oneof"` +} + +type AgentEvent_EndpointRegenerate struct { + // used for ENDPOINT_REGENERATE_SUCCESS and ENDPOINT_REGENERATE_FAILURE + EndpointRegenerate *EndpointRegenNotification `protobuf:"bytes,103,opt,name=endpoint_regenerate,json=endpointRegenerate,proto3,oneof"` +} + +type AgentEvent_EndpointUpdate struct { + // used for ENDPOINT_CREATED and ENDPOINT_DELETED + EndpointUpdate *EndpointUpdateNotification `protobuf:"bytes,104,opt,name=endpoint_update,json=endpointUpdate,proto3,oneof"` +} + +type AgentEvent_IpcacheUpdate struct { + // used for IPCACHE_UPSERTED and IPCACHE_DELETED + IpcacheUpdate *IPCacheNotification `protobuf:"bytes,105,opt,name=ipcache_update,json=ipcacheUpdate,proto3,oneof"` +} + +type AgentEvent_ServiceUpsert struct { + ServiceUpsert *ServiceUpsertNotification `protobuf:"bytes,106,opt,name=service_upsert,json=serviceUpsert,proto3,oneof"` +} + +type AgentEvent_ServiceDelete struct { + ServiceDelete *ServiceDeleteNotification `protobuf:"bytes,107,opt,name=service_delete,json=serviceDelete,proto3,oneof"` +} + +func (*AgentEvent_Unknown) isAgentEvent_Notification() {} + +func (*AgentEvent_AgentStart) isAgentEvent_Notification() {} + +func (*AgentEvent_PolicyUpdate) isAgentEvent_Notification() {} + +func (*AgentEvent_EndpointRegenerate) isAgentEvent_Notification() {} + +func (*AgentEvent_EndpointUpdate) isAgentEvent_Notification() {} + +func (*AgentEvent_IpcacheUpdate) isAgentEvent_Notification() {} + +func (*AgentEvent_ServiceUpsert) isAgentEvent_Notification() {} + +func (*AgentEvent_ServiceDelete) isAgentEvent_Notification() {} + +type AgentEventUnknown struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Notification string `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` +} + +func (x *AgentEventUnknown) Reset() { + *x = AgentEventUnknown{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentEventUnknown) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentEventUnknown) ProtoMessage() {} + +func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentEventUnknown.ProtoReflect.Descriptor instead. +func (*AgentEventUnknown) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{26} +} + +func (x *AgentEventUnknown) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *AgentEventUnknown) GetNotification() string { + if x != nil { + return x.Notification + } + return "" +} + +type TimeNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` +} + +func (x *TimeNotification) Reset() { + *x = TimeNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeNotification) ProtoMessage() {} + +func (x *TimeNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeNotification.ProtoReflect.Descriptor instead. +func (*TimeNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{27} +} + +func (x *TimeNotification) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +type PolicyUpdateNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels []string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + Revision uint64 `protobuf:"varint,2,opt,name=revision,proto3" json:"revision,omitempty"` + RuleCount int64 `protobuf:"varint,3,opt,name=rule_count,json=ruleCount,proto3" json:"rule_count,omitempty"` +} + +func (x *PolicyUpdateNotification) Reset() { + *x = PolicyUpdateNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PolicyUpdateNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PolicyUpdateNotification) ProtoMessage() {} + +func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PolicyUpdateNotification.ProtoReflect.Descriptor instead. +func (*PolicyUpdateNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{28} +} + +func (x *PolicyUpdateNotification) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *PolicyUpdateNotification) GetRevision() uint64 { + if x != nil { + return x.Revision + } + return 0 +} + +func (x *PolicyUpdateNotification) GetRuleCount() int64 { + if x != nil { + return x.RuleCount + } + return 0 +} + +type EndpointRegenNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *EndpointRegenNotification) Reset() { + *x = EndpointRegenNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointRegenNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointRegenNotification) ProtoMessage() {} + +func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointRegenNotification.ProtoReflect.Descriptor instead. +func (*EndpointRegenNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{29} +} + +func (x *EndpointRegenNotification) GetId() uint64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *EndpointRegenNotification) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *EndpointRegenNotification) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type EndpointUpdateNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + PodName string `protobuf:"bytes,4,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (x *EndpointUpdateNotification) Reset() { + *x = EndpointUpdateNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointUpdateNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointUpdateNotification) ProtoMessage() {} + +func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointUpdateNotification.ProtoReflect.Descriptor instead. +func (*EndpointUpdateNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{30} +} + +func (x *EndpointUpdateNotification) GetId() uint64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *EndpointUpdateNotification) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *EndpointUpdateNotification) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *EndpointUpdateNotification) GetPodName() string { + if x != nil { + return x.PodName + } + return "" +} + +func (x *EndpointUpdateNotification) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +type IPCacheNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cidr string `protobuf:"bytes,1,opt,name=cidr,proto3" json:"cidr,omitempty"` + Identity uint32 `protobuf:"varint,2,opt,name=identity,proto3" json:"identity,omitempty"` + OldIdentity *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=old_identity,json=oldIdentity,proto3" json:"old_identity,omitempty"` + HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + OldHostIp string `protobuf:"bytes,5,opt,name=old_host_ip,json=oldHostIp,proto3" json:"old_host_ip,omitempty"` + EncryptKey uint32 `protobuf:"varint,6,opt,name=encrypt_key,json=encryptKey,proto3" json:"encrypt_key,omitempty"` + Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"` + PodName string `protobuf:"bytes,8,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` +} + +func (x *IPCacheNotification) Reset() { + *x = IPCacheNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPCacheNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPCacheNotification) ProtoMessage() {} + +func (x *IPCacheNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPCacheNotification.ProtoReflect.Descriptor instead. +func (*IPCacheNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{31} +} + +func (x *IPCacheNotification) GetCidr() string { + if x != nil { + return x.Cidr + } + return "" +} + +func (x *IPCacheNotification) GetIdentity() uint32 { + if x != nil { + return x.Identity + } + return 0 +} + +func (x *IPCacheNotification) GetOldIdentity() *wrapperspb.UInt32Value { + if x != nil { + return x.OldIdentity + } + return nil +} + +func (x *IPCacheNotification) GetHostIp() string { + if x != nil { + return x.HostIp + } + return "" +} + +func (x *IPCacheNotification) GetOldHostIp() string { + if x != nil { + return x.OldHostIp + } + return "" +} + +func (x *IPCacheNotification) GetEncryptKey() uint32 { + if x != nil { + return x.EncryptKey + } + return 0 +} + +func (x *IPCacheNotification) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *IPCacheNotification) GetPodName() string { + if x != nil { + return x.PodName + } + return "" +} + +type ServiceUpsertNotificationAddr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` +} + +func (x *ServiceUpsertNotificationAddr) Reset() { + *x = ServiceUpsertNotificationAddr{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceUpsertNotificationAddr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceUpsertNotificationAddr) ProtoMessage() {} + +func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceUpsertNotificationAddr.ProtoReflect.Descriptor instead. +func (*ServiceUpsertNotificationAddr) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{32} +} + +func (x *ServiceUpsertNotificationAddr) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *ServiceUpsertNotificationAddr) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +type ServiceUpsertNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + FrontendAddress *ServiceUpsertNotificationAddr `protobuf:"bytes,2,opt,name=frontend_address,json=frontendAddress,proto3" json:"frontend_address,omitempty"` + BackendAddresses []*ServiceUpsertNotificationAddr `protobuf:"bytes,3,rep,name=backend_addresses,json=backendAddresses,proto3" json:"backend_addresses,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + // Deprecated: Marked as deprecated in flow/flow.proto. + TrafficPolicy string `protobuf:"bytes,5,opt,name=traffic_policy,json=trafficPolicy,proto3" json:"traffic_policy,omitempty"` + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"` + ExtTrafficPolicy string `protobuf:"bytes,8,opt,name=ext_traffic_policy,json=extTrafficPolicy,proto3" json:"ext_traffic_policy,omitempty"` + IntTrafficPolicy string `protobuf:"bytes,9,opt,name=int_traffic_policy,json=intTrafficPolicy,proto3" json:"int_traffic_policy,omitempty"` +} + +func (x *ServiceUpsertNotification) Reset() { + *x = ServiceUpsertNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceUpsertNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceUpsertNotification) ProtoMessage() {} + +func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceUpsertNotification.ProtoReflect.Descriptor instead. +func (*ServiceUpsertNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{33} +} + +func (x *ServiceUpsertNotification) GetId() uint32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *ServiceUpsertNotification) GetFrontendAddress() *ServiceUpsertNotificationAddr { + if x != nil { + return x.FrontendAddress + } + return nil +} + +func (x *ServiceUpsertNotification) GetBackendAddresses() []*ServiceUpsertNotificationAddr { + if x != nil { + return x.BackendAddresses + } + return nil +} + +func (x *ServiceUpsertNotification) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +// Deprecated: Marked as deprecated in flow/flow.proto. +func (x *ServiceUpsertNotification) GetTrafficPolicy() string { + if x != nil { + return x.TrafficPolicy + } + return "" +} + +func (x *ServiceUpsertNotification) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceUpsertNotification) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ServiceUpsertNotification) GetExtTrafficPolicy() string { + if x != nil { + return x.ExtTrafficPolicy + } + return "" +} + +func (x *ServiceUpsertNotification) GetIntTrafficPolicy() string { + if x != nil { + return x.IntTrafficPolicy + } + return "" +} + +type ServiceDeleteNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ServiceDeleteNotification) Reset() { + *x = ServiceDeleteNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDeleteNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDeleteNotification) ProtoMessage() {} + +func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDeleteNotification.ProtoReflect.Descriptor instead. +func (*ServiceDeleteNotification) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{34} +} + +func (x *ServiceDeleteNotification) GetId() uint32 { + if x != nil { + return x.Id + } + return 0 +} + +type NetworkInterface struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *NetworkInterface) Reset() { + *x = NetworkInterface{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkInterface) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkInterface) ProtoMessage() {} + +func (x *NetworkInterface) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkInterface.ProtoReflect.Descriptor instead. +func (*NetworkInterface) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{35} +} + +func (x *NetworkInterface) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *NetworkInterface) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type DebugEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type DebugEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.DebugEventType" json:"type,omitempty"` + Source *Endpoint `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + Hash *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` + Arg1 *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=arg1,proto3" json:"arg1,omitempty"` + Arg2 *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=arg2,proto3" json:"arg2,omitempty"` + Arg3 *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=arg3,proto3" json:"arg3,omitempty"` + Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"` + Cpu *wrapperspb.Int32Value `protobuf:"bytes,8,opt,name=cpu,proto3" json:"cpu,omitempty"` +} + +func (x *DebugEvent) Reset() { + *x = DebugEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_flow_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DebugEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DebugEvent) ProtoMessage() {} + +func (x *DebugEvent) ProtoReflect() protoreflect.Message { + mi := &file_flow_flow_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead. +func (*DebugEvent) Descriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{36} +} + +func (x *DebugEvent) GetType() DebugEventType { + if x != nil { + return x.Type + } + return DebugEventType_DBG_EVENT_UNKNOWN +} + +func (x *DebugEvent) GetSource() *Endpoint { + if x != nil { + return x.Source + } + return nil +} + +func (x *DebugEvent) GetHash() *wrapperspb.UInt32Value { + if x != nil { + return x.Hash + } + return nil +} + +func (x *DebugEvent) GetArg1() *wrapperspb.UInt32Value { + if x != nil { + return x.Arg1 + } + return nil +} + +func (x *DebugEvent) GetArg2() *wrapperspb.UInt32Value { + if x != nil { + return x.Arg2 + } + return nil +} + +func (x *DebugEvent) GetArg3() *wrapperspb.UInt32Value { + if x != nil { + return x.Arg3 + } + return nil +} + +func (x *DebugEvent) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *DebugEvent) GetCpu() *wrapperspb.Int32Value { + if x != nil { + return x.Cpu + } + return nil +} + +var File_flow_flow_proto protoreflect.FileDescriptor + +var file_flow_flow_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xfc, 0x0c, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2e, 0x0a, 0x04, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x75, 0x75, 0x69, 0x64, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0b, 0x64, 0x72, 0x6f, + 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x0a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2b, + 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x08, 0x65, + 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x08, 0x65, + 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x02, 0x49, + 0x50, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x34, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, 0x52, 0x02, 0x6c, 0x34, 0x12, + 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, + 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x37, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x37, 0x52, 0x02, 0x6c, 0x37, 0x12, 0x18, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, + 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x72, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3e, + 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, + 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x53, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x15, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x44, 0x65, 0x73, 0x63, + 0x12, 0x35, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x69, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x13, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x5f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, + 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x11, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, + 0x0a, 0x10, 0x73, 0x6f, 0x63, 0x6b, 0x5f, 0x78, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x58, 0x6c, 0x61, 0x74, + 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0xa0, 0x8d, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf0, 0x93, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3a, 0x0a, 0x11, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x89, 0xa4, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x65, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x12, 0x3c, 0x0a, 0x12, + 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x18, 0x8a, 0xa4, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x22, 0xc4, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, 0x12, 0x1d, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x54, 0x43, 0x50, 0x48, 0x00, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x1d, 0x0a, 0x03, + 0x55, 0x44, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x55, 0x44, 0x50, 0x48, 0x00, 0x52, 0x03, 0x55, 0x44, 0x50, 0x12, 0x26, 0x0a, 0x06, 0x49, + 0x43, 0x4d, 0x50, 0x76, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, + 0x50, 0x76, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, + 0x36, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x20, 0x0a, 0x04, 0x53, + 0x43, 0x54, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x53, 0x43, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x53, 0x43, 0x54, 0x50, 0x42, 0x0a, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x37, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4e, 0x73, 0x12, 0x1d, 0x0a, 0x03, 0x64, 0x6e, 0x73, + 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x4e, + 0x53, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, + 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x23, 0x0a, 0x05, 0x6b, 0x61, + 0x66, 0x6b, 0x61, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x48, 0x00, 0x52, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x42, + 0x08, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x39, 0x0a, 0x0c, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x29, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x22, 0x28, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xb5, + 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x19, 0x0a, + 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, + 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x32, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x77, 0x0a, 0x03, 0x54, 0x43, + 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a, + 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, + 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x22, 0x44, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x54, 0x43, 0x50, 0x46, + 0x6c, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x49, 0x4e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x03, 0x46, 0x49, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x59, 0x4e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x03, 0x53, 0x59, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x53, 0x54, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x52, 0x53, 0x54, 0x12, 0x10, 0x0a, 0x03, 0x50, 0x53, + 0x48, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x50, 0x53, 0x48, 0x12, 0x10, 0x0a, 0x03, + 0x41, 0x43, 0x4b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x41, 0x43, 0x4b, 0x12, 0x10, + 0x0a, 0x03, 0x55, 0x52, 0x47, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x55, 0x52, 0x47, + 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x45, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x45, + 0x43, 0x45, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x57, 0x52, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x03, 0x43, 0x57, 0x52, 0x12, 0x0e, 0x0a, 0x02, 0x4e, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x02, 0x4e, 0x53, 0x22, 0x51, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, 0x04, 0x53, 0x43, 0x54, 0x50, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x30, 0x0a, 0x06, 0x49, + 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x30, 0x0a, + 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, + 0x6e, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x66, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, + 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0f, 0x43, 0x69, 0x6c, 0x69, 0x75, + 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x82, 0x0a, 0x0a, 0x0a, 0x46, 0x6c, + 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, + 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x71, 0x64, 0x6e, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0b, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, + 0x6f, 0x61, 0x64, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, + 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, + 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0d, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x08, 0x52, 0x05, 0x72, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x14, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a, + 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x15, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x16, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x68, + 0x74, 0x74, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x31, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x20, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x68, + 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x63, 0x70, + 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x08, 0x74, 0x63, + 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, + 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xce, + 0x01, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x69, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x71, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0x34, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x1d, + 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, + 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x72, 0x72, 0x65, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x2d, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6c, 0x6f, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x4c, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x03, 0x63, 0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, + 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, + 0x0a, 0x0d, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, + 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x67, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x68, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x70, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x18, 0x6a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x0e, + 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, + 0x0a, 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x10, 0x54, + 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, + 0x6d, 0x0a, 0x18, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, + 0x0a, 0x19, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, + 0x99, 0x02, 0x0a, 0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x6f, 0x6c, 0x64, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6f, 0x6c, 0x64, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, + 0x5f, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, + 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x49, + 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x4b, + 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x1d, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x22, 0x9a, 0x03, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, + 0x0a, 0x10, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x0f, 0x66, + 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x50, + 0x0a, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x10, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, + 0x78, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2b, 0x0a, + 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3c, 0x0a, 0x10, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02, 0x0a, 0x0a, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x04, 0x61, + 0x72, 0x67, 0x31, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x31, 0x12, 0x30, 0x0a, + 0x04, 0x61, 0x72, 0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x32, 0x12, + 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, + 0x33, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x63, + 0x70, 0x75, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a, 0x39, 0x0a, 0x08, 0x46, 0x6c, + 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x33, 0x5f, 0x4c, + 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x4f, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x02, + 0x2a, 0xea, 0x01, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, + 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x53, + 0x54, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4f, 0x56, 0x45, + 0x52, 0x4c, 0x41, 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x4f, 0x5f, 0x45, 0x4e, 0x44, + 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x52, 0x4f, 0x4d, 0x5f, + 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, + 0x4d, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, + 0x4d, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, 0x12, 0x10, 0x0a, 0x0c, 0x46, + 0x52, 0x4f, 0x4d, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0a, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x4f, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0b, 0x2a, 0x48, 0x0a, + 0x0a, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, + 0x08, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, 0x49, 0x50, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, + 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x34, 0x10, 0x01, 0x12, + 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x64, 0x69, 0x63, 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, + 0x57, 0x41, 0x52, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x4f, 0x50, + 0x50, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x52, + 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x54, + 0x52, 0x41, 0x43, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x4e, 0x53, + 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x2a, 0xfb, 0x10, 0x0a, 0x0a, 0x44, 0x72, 0x6f, 0x70, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, + 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x4d, 0x41, 0x43, 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x84, 0x01, 0x12, 0x12, + 0x0a, 0x0d, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, + 0x85, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x50, 0x41, + 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x86, 0x01, 0x12, + 0x23, 0x0a, 0x1e, 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, + 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x10, 0x87, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x88, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x89, 0x01, 0x12, + 0x27, 0x0a, 0x22, 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, + 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x43, 0x4f, 0x4c, 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x49, 0x53, 0x53, 0x45, 0x44, + 0x5f, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x8c, 0x01, 0x12, 0x1c, 0x0a, + 0x17, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x54, + 0x4f, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, + 0x4f, 0x4c, 0x10, 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x8f, 0x01, 0x12, + 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, + 0x34, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x43, 0x4f, 0x44, 0x45, + 0x10, 0x91, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, + 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x92, 0x01, 0x12, 0x20, 0x0a, + 0x1b, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, + 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x93, 0x01, 0x12, + 0x24, 0x0a, 0x1f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, + 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, + 0x4e, 0x53, 0x10, 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x95, + 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x33, 0x5f, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x96, + 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x5f, 0x55, 0x4e, + 0x52, 0x4f, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x97, 0x01, 0x12, 0x26, + 0x0a, 0x21, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f, + 0x43, 0x41, 0x4c, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, + 0x5f, 0x4c, 0x33, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x99, 0x01, 0x12, + 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, + 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x48, 0x45, + 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, + 0x41, 0x50, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x45, 0x44, 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x49, 0x50, + 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x9d, 0x01, 0x12, + 0x1e, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, + 0x4e, 0x44, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x9e, 0x01, 0x12, + 0x28, 0x0a, 0x23, 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x52, 0x5f, + 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, + 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x46, 0x41, 0x49, + 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x49, 0x4e, + 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50, 0x10, 0xa1, 0x01, 0x12, 0x2b, + 0x0a, 0x26, 0x52, 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45, 0x44, 0x54, 0x5f, 0x52, 0x41, + 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x52, 0x4f, 0x50, + 0x5f, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x10, 0xa3, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x48, 0x4f, 0x53, + 0x54, 0x5f, 0x49, 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, + 0x10, 0xa4, 0x01, 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, + 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52, 0x4d, 0x5f, 0x50, 0x4f, 0x4c, + 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0xa5, 0x01, 0x12, + 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, + 0x32, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xa6, 0x01, 0x12, 0x22, 0x0a, + 0x1d, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, + 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa7, + 0x01, 0x12, 0x2c, 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, + 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, + 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa8, 0x01, 0x12, + 0x16, 0x0a, 0x11, 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x45, 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x45, 0x4e, 0x43, 0x41, 0x50, + 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, + 0x5f, 0x49, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49, 0x54, 0x45, 0x44, 0x10, 0xaa, + 0x01, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x44, 0x45, + 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10, 0xac, 0x01, 0x12, 0x13, 0x0a, + 0x0e, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, + 0xad, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, + 0x45, 0x52, 0x49, 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x46, 0x49, 0x52, 0x53, 0x54, + 0x5f, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x47, 0x52, 0x41, + 0x4d, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, + 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18, 0x46, 0x4f, 0x52, 0x42, 0x49, + 0x44, 0x44, 0x45, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x4d, 0x45, 0x53, 0x53, + 0x41, 0x47, 0x45, 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, + 0x5f, 0x42, 0x59, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, + 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0xb2, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x41, + 0x53, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb3, 0x01, 0x12, + 0x31, 0x0a, 0x2c, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, + 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, + 0xb4, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, + 0x59, 0x10, 0xb5, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41, 0x4e, 0x5f, 0x46, 0x49, 0x4c, + 0x54, 0x45, 0x52, 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, + 0xb8, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x10, 0xb9, 0x01, 0x12, + 0x17, 0x0a, 0x12, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x52, 0x56, 0x36, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x34, + 0x36, 0x10, 0xbb, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x36, 0x34, 0x10, 0xbc, 0x01, + 0x12, 0x12, 0x0a, 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0xbd, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, + 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, 0x4e, + 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, + 0xbf, 0x01, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4c, + 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x55, + 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, + 0x50, 0x10, 0xc1, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f, 0x45, 0x47, 0x52, 0x45, 0x53, + 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2, 0x01, 0x12, 0x18, 0x0a, 0x13, + 0x55, 0x4e, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x46, + 0x46, 0x49, 0x43, 0x10, 0xc3, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x54, 0x54, 0x4c, 0x5f, 0x45, 0x58, + 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xc4, 0x01, 0x12, 0x0f, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, + 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x44, 0x10, 0xc5, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x44, 0x52, + 0x4f, 0x50, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0xc6, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x49, 0x47, 0x4d, 0x50, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, + 0x45, 0x44, 0x10, 0xc7, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x49, 0x47, 0x4d, 0x50, 0x5f, 0x53, 0x55, + 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x44, 0x10, 0xc8, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4d, + 0x55, 0x4c, 0x54, 0x49, 0x43, 0x41, 0x53, 0x54, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x44, + 0x10, 0xc9, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, 0x53, 0x54, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0xca, 0x01, 0x12, 0x16, 0x0a, + 0x11, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x45, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x59, 0x10, 0xcb, 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x52, 0x41, + 0x46, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, + 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, + 0x02, 0x2a, 0x8d, 0x02, 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, + 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x43, + 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, + 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x04, + 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, + 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, + 0x34, 0x36, 0x10, 0x06, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, + 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10, 0x07, 0x12, + 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, + 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, + 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, + 0x50, 0x4f, 0x53, 0x54, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, + 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x0a, + 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08, 0x01, 0x10, + 0x03, 0x2a, 0x39, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x0f, + 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x1d, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53, 0x54, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, 0x12, 0x1a, + 0x0a, 0x16, 0x50, 0x45, 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x49, 0x4e, + 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, + 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f, 0x51, 0x55, + 0x45, 0x55, 0x45, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c, 0x45, 0x5f, + 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a, 0xae, 0x02, + 0x0a, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, + 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, + 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, + 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, + 0x45, 0x44, 0x10, 0x04, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, + 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, + 0x45, 0x53, 0x53, 0x10, 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, + 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x55, 0x52, 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, + 0x4e, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, + 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, + 0x10, 0x08, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x55, 0x50, + 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50, 0x43, 0x41, + 0x43, 0x48, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, + 0x10, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, + 0x44, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x44, + 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01, 0x2a, 0xd8, + 0x01, 0x0a, 0x16, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x4f, 0x43, + 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, + 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, + 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x01, 0x12, + 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, + 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, + 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, + 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x03, + 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, + 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0x81, 0x0d, 0x0a, 0x0e, 0x44, 0x65, + 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, + 0x44, 0x42, 0x47, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x49, 0x43, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, + 0x4c, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, + 0x44, 0x42, 0x47, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x44, + 0x42, 0x47, 0x5f, 0x4c, 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x15, + 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, + 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, + 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x07, 0x12, + 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, + 0x08, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, + 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, + 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x10, 0x0b, + 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x49, + 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, + 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, + 0x45, 0x44, 0x45, 0x44, 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, + 0x5f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, + 0x47, 0x5f, 0x44, 0x45, 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, + 0x5f, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a, 0x0d, 0x44, + 0x42, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12, 0x12, 0x0f, + 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x13, 0x12, + 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, + 0x14, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48, 0x41, 0x53, + 0x48, 0x10, 0x15, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, + 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x16, + 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, + 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, + 0x10, 0x17, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, + 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, + 0x54, 0x10, 0x18, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, + 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, + 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27, 0x0a, 0x23, + 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, + 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, + 0x41, 0x49, 0x4c, 0x10, 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, + 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, + 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, + 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, + 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, + 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x1d, 0x12, + 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, + 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, + 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, + 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f, 0x12, 0x1f, + 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x20, 0x12, + 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, + 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, + 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, + 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, + 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, + 0x22, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, + 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, + 0x10, 0x23, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, + 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x10, 0x24, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, + 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a, 0x15, 0x44, + 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, + 0x53, 0x4e, 0x41, 0x54, 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, + 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, + 0x52, 0x45, 0x56, 0x10, 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, + 0x5f, 0x52, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, + 0x5f, 0x53, 0x45, 0x4c, 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, + 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x2a, + 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, + 0x59, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, + 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, + 0x45, 0x10, 0x2c, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x4f, + 0x4c, 0x49, 0x43, 0x59, 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, + 0x54, 0x44, 0x45, 0x56, 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, + 0x2e, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, + 0x45, 0x4e, 0x43, 0x41, 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, + 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30, 0x12, 0x14, + 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, + 0x5f, 0x32, 0x10, 0x31, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, + 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10, 0x33, 0x12, + 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x36, 0x5f, 0x32, 0x10, 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, + 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36, 0x12, 0x11, + 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, + 0x37, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, + 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19, 0x0a, 0x15, + 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x45, 0x44, 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, + 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, + 0x34, 0x10, 0x3a, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, + 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10, 0x3b, 0x12, + 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, + 0x43, 0x54, 0x10, 0x3c, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e, 0x48, 0x45, + 0x52, 0x49, 0x54, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d, 0x12, 0x12, + 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, + 0x10, 0x3e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, + 0x4b, 0x55, 0x50, 0x36, 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, + 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x40, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, + 0x5f, 0x4c, 0x37, 0x5f, 0x4c, 0x42, 0x10, 0x41, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, + 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x42, 0x42, 0x26, 0x5a, + 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, + 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flow_flow_proto_rawDescOnce sync.Once + file_flow_flow_proto_rawDescData = file_flow_flow_proto_rawDesc +) + +func file_flow_flow_proto_rawDescGZIP() []byte { + file_flow_flow_proto_rawDescOnce.Do(func() { + file_flow_flow_proto_rawDescData = protoimpl.X.CompressGZIP(file_flow_flow_proto_rawDescData) + }) + return file_flow_flow_proto_rawDescData +} + +var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 14) +var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 37) +var file_flow_flow_proto_goTypes = []interface{}{ + (FlowType)(0), // 0: flow.FlowType + (AuthType)(0), // 1: flow.AuthType + (TraceObservationPoint)(0), // 2: flow.TraceObservationPoint + (L7FlowType)(0), // 3: flow.L7FlowType + (IPVersion)(0), // 4: flow.IPVersion + (Verdict)(0), // 5: flow.Verdict + (DropReason)(0), // 6: flow.DropReason + (TrafficDirection)(0), // 7: flow.TrafficDirection + (DebugCapturePoint)(0), // 8: flow.DebugCapturePoint + (EventType)(0), // 9: flow.EventType + (LostEventSource)(0), // 10: flow.LostEventSource + (AgentEventType)(0), // 11: flow.AgentEventType + (SocketTranslationPoint)(0), // 12: flow.SocketTranslationPoint + (DebugEventType)(0), // 13: flow.DebugEventType + (*Flow)(nil), // 14: flow.Flow + (*Layer4)(nil), // 15: flow.Layer4 + (*Layer7)(nil), // 16: flow.Layer7 + (*TraceContext)(nil), // 17: flow.TraceContext + (*TraceParent)(nil), // 18: flow.TraceParent + (*Endpoint)(nil), // 19: flow.Endpoint + (*Workload)(nil), // 20: flow.Workload + (*TCP)(nil), // 21: flow.TCP + (*IP)(nil), // 22: flow.IP + (*Ethernet)(nil), // 23: flow.Ethernet + (*TCPFlags)(nil), // 24: flow.TCPFlags + (*UDP)(nil), // 25: flow.UDP + (*SCTP)(nil), // 26: flow.SCTP + (*ICMPv4)(nil), // 27: flow.ICMPv4 + (*ICMPv6)(nil), // 28: flow.ICMPv6 + (*Policy)(nil), // 29: flow.Policy + (*EventTypeFilter)(nil), // 30: flow.EventTypeFilter + (*CiliumEventType)(nil), // 31: flow.CiliumEventType + (*FlowFilter)(nil), // 32: flow.FlowFilter + (*DNS)(nil), // 33: flow.DNS + (*HTTPHeader)(nil), // 34: flow.HTTPHeader + (*HTTP)(nil), // 35: flow.HTTP + (*Kafka)(nil), // 36: flow.Kafka + (*Service)(nil), // 37: flow.Service + (*LostEvent)(nil), // 38: flow.LostEvent + (*AgentEvent)(nil), // 39: flow.AgentEvent + (*AgentEventUnknown)(nil), // 40: flow.AgentEventUnknown + (*TimeNotification)(nil), // 41: flow.TimeNotification + (*PolicyUpdateNotification)(nil), // 42: flow.PolicyUpdateNotification + (*EndpointRegenNotification)(nil), // 43: flow.EndpointRegenNotification + (*EndpointUpdateNotification)(nil), // 44: flow.EndpointUpdateNotification + (*IPCacheNotification)(nil), // 45: flow.IPCacheNotification + (*ServiceUpsertNotificationAddr)(nil), // 46: flow.ServiceUpsertNotificationAddr + (*ServiceUpsertNotification)(nil), // 47: flow.ServiceUpsertNotification + (*ServiceDeleteNotification)(nil), // 48: flow.ServiceDeleteNotification + (*NetworkInterface)(nil), // 49: flow.NetworkInterface + (*DebugEvent)(nil), // 50: flow.DebugEvent + (*timestamppb.Timestamp)(nil), // 51: google.protobuf.Timestamp + (*wrapperspb.BoolValue)(nil), // 52: google.protobuf.BoolValue + (*anypb.Any)(nil), // 53: google.protobuf.Any + (*wrapperspb.Int32Value)(nil), // 54: google.protobuf.Int32Value + (*wrapperspb.UInt32Value)(nil), // 55: google.protobuf.UInt32Value +} +var file_flow_flow_proto_depIdxs = []int32{ + 51, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp + 5, // 1: flow.Flow.verdict:type_name -> flow.Verdict + 1, // 2: flow.Flow.auth_type:type_name -> flow.AuthType + 23, // 3: flow.Flow.ethernet:type_name -> flow.Ethernet + 22, // 4: flow.Flow.IP:type_name -> flow.IP + 15, // 5: flow.Flow.l4:type_name -> flow.Layer4 + 19, // 6: flow.Flow.source:type_name -> flow.Endpoint + 19, // 7: flow.Flow.destination:type_name -> flow.Endpoint + 0, // 8: flow.Flow.Type:type_name -> flow.FlowType + 16, // 9: flow.Flow.l7:type_name -> flow.Layer7 + 31, // 10: flow.Flow.event_type:type_name -> flow.CiliumEventType + 37, // 11: flow.Flow.source_service:type_name -> flow.Service + 37, // 12: flow.Flow.destination_service:type_name -> flow.Service + 7, // 13: flow.Flow.traffic_direction:type_name -> flow.TrafficDirection + 2, // 14: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint + 6, // 15: flow.Flow.drop_reason_desc:type_name -> flow.DropReason + 52, // 16: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue + 8, // 17: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint + 49, // 18: flow.Flow.interface:type_name -> flow.NetworkInterface + 17, // 19: flow.Flow.trace_context:type_name -> flow.TraceContext + 12, // 20: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint + 53, // 21: flow.Flow.extensions:type_name -> google.protobuf.Any + 29, // 22: flow.Flow.egress_allowed_by:type_name -> flow.Policy + 29, // 23: flow.Flow.ingress_allowed_by:type_name -> flow.Policy + 21, // 24: flow.Layer4.TCP:type_name -> flow.TCP + 25, // 25: flow.Layer4.UDP:type_name -> flow.UDP + 27, // 26: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4 + 28, // 27: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6 + 26, // 28: flow.Layer4.SCTP:type_name -> flow.SCTP + 3, // 29: flow.Layer7.type:type_name -> flow.L7FlowType + 33, // 30: flow.Layer7.dns:type_name -> flow.DNS + 35, // 31: flow.Layer7.http:type_name -> flow.HTTP + 36, // 32: flow.Layer7.kafka:type_name -> flow.Kafka + 18, // 33: flow.TraceContext.parent:type_name -> flow.TraceParent + 20, // 34: flow.Endpoint.workloads:type_name -> flow.Workload + 24, // 35: flow.TCP.flags:type_name -> flow.TCPFlags + 4, // 36: flow.IP.ipVersion:type_name -> flow.IPVersion + 20, // 37: flow.FlowFilter.source_workload:type_name -> flow.Workload + 20, // 38: flow.FlowFilter.destination_workload:type_name -> flow.Workload + 7, // 39: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection + 5, // 40: flow.FlowFilter.verdict:type_name -> flow.Verdict + 30, // 41: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter + 34, // 42: flow.FlowFilter.http_header:type_name -> flow.HTTPHeader + 24, // 43: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags + 4, // 44: flow.FlowFilter.ip_version:type_name -> flow.IPVersion + 34, // 45: flow.HTTP.headers:type_name -> flow.HTTPHeader + 10, // 46: flow.LostEvent.source:type_name -> flow.LostEventSource + 54, // 47: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value + 11, // 48: flow.AgentEvent.type:type_name -> flow.AgentEventType + 40, // 49: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown + 41, // 50: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification + 42, // 51: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification + 43, // 52: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification + 44, // 53: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification + 45, // 54: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification + 47, // 55: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification + 48, // 56: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification + 51, // 57: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp + 55, // 58: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value + 46, // 59: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr + 46, // 60: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr + 13, // 61: flow.DebugEvent.type:type_name -> flow.DebugEventType + 19, // 62: flow.DebugEvent.source:type_name -> flow.Endpoint + 55, // 63: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value + 55, // 64: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value + 55, // 65: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value + 55, // 66: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value + 54, // 67: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value + 68, // [68:68] is the sub-list for method output_type + 68, // [68:68] is the sub-list for method input_type + 68, // [68:68] is the sub-list for extension type_name + 68, // [68:68] is the sub-list for extension extendee + 0, // [0:68] is the sub-list for field type_name +} + +func init() { file_flow_flow_proto_init() } +func file_flow_flow_proto_init() { + if File_flow_flow_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flow_flow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Flow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Layer4); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Layer7); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceParent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TCP); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IP); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Ethernet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TCPFlags); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UDP); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SCTP); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ICMPv4); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ICMPv6); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventTypeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CiliumEventType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FlowFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DNS); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTP); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Kafka); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LostEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentEventUnknown); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimeNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PolicyUpdateNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointRegenNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointUpdateNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPCacheNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceUpsertNotificationAddr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceUpsertNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDeleteNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkInterface); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flow_flow_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_flow_flow_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Layer4_TCP)(nil), + (*Layer4_UDP)(nil), + (*Layer4_ICMPv4)(nil), + (*Layer4_ICMPv6)(nil), + (*Layer4_SCTP)(nil), + } + file_flow_flow_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Layer7_Dns)(nil), + (*Layer7_Http)(nil), + (*Layer7_Kafka)(nil), + } + file_flow_flow_proto_msgTypes[25].OneofWrappers = []interface{}{ + (*AgentEvent_Unknown)(nil), + (*AgentEvent_AgentStart)(nil), + (*AgentEvent_PolicyUpdate)(nil), + (*AgentEvent_EndpointRegenerate)(nil), + (*AgentEvent_EndpointUpdate)(nil), + (*AgentEvent_IpcacheUpdate)(nil), + (*AgentEvent_ServiceUpsert)(nil), + (*AgentEvent_ServiceDelete)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flow_flow_proto_rawDesc, + NumEnums: 14, + NumMessages: 37, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flow_flow_proto_goTypes, + DependencyIndexes: file_flow_flow_proto_depIdxs, + EnumInfos: file_flow_flow_proto_enumTypes, + MessageInfos: file_flow_flow_proto_msgTypes, + }.Build() + File_flow_flow_proto = out.File + file_flow_flow_proto_rawDesc = nil + file_flow_flow_proto_goTypes = nil + file_flow_flow_proto_depIdxs = nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go new file mode 100644 index 0000000000..09a8e88acb --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go @@ -0,0 +1,600 @@ +// Code generated by protoc-gen-go-json. DO NOT EDIT. +// source: flow/flow.proto + +package flow + +import ( + "google.golang.org/protobuf/encoding/protojson" +) + +// MarshalJSON implements json.Marshaler +func (msg *Flow) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Flow) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Layer4) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Layer4) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Layer7) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Layer7) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *TraceContext) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *TraceContext) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *TraceParent) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *TraceParent) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Endpoint) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Endpoint) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Workload) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Workload) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *TCP) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *TCP) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *IP) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *IP) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Ethernet) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Ethernet) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *TCPFlags) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *TCPFlags) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *UDP) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *UDP) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *SCTP) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *SCTP) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *ICMPv4) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *ICMPv4) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *ICMPv6) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *ICMPv6) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Policy) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Policy) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *EventTypeFilter) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *EventTypeFilter) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *CiliumEventType) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *CiliumEventType) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *FlowFilter) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *FlowFilter) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *DNS) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *DNS) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *HTTPHeader) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *HTTPHeader) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *HTTP) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *HTTP) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Kafka) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Kafka) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Service) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Service) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *LostEvent) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *LostEvent) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *AgentEvent) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *AgentEvent) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *AgentEventUnknown) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *AgentEventUnknown) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *TimeNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *TimeNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *PolicyUpdateNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *PolicyUpdateNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *EndpointRegenNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *EndpointRegenNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *EndpointUpdateNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *EndpointUpdateNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *IPCacheNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *IPCacheNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *ServiceUpsertNotificationAddr) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *ServiceUpsertNotificationAddr) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *ServiceUpsertNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *ServiceUpsertNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *ServiceDeleteNotification) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *ServiceDeleteNotification) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *NetworkInterface) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *NetworkInterface) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *DebugEvent) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *DebugEvent) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto new file mode 100644 index 0000000000..aa04ad9f3c --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; +import "google/protobuf/timestamp.proto"; + +package flow; + +option go_package = "github.com/cilium/cilium/api/v1/flow"; + +message Flow { + google.protobuf.Timestamp time = 1; + + // uuid is a universally unique identifier for this flow. + string uuid = 34; + + Verdict verdict = 2; + // only applicable to Verdict = DROPPED. + // deprecated in favor of drop_reason_desc. + uint32 drop_reason = 3 [deprecated=true]; + + // auth_type is the authentication type specified for the flow in Cilium Network Policy. + // Only set on policy verdict events. + AuthType auth_type = 35; + + // l2 + Ethernet ethernet = 4; + // l3 + IP IP = 5; + // l4 + Layer4 l4 = 6; + + reserved 7; // removed, do not use + + Endpoint source = 8; + Endpoint destination = 9; + + FlowType Type = 10; + + // NodeName is the name of the node from which this Flow was captured. + string node_name = 11; + + reserved 12; // removed, do not use + + // all names the source IP can have. + repeated string source_names = 13; + // all names the destination IP can have. + repeated string destination_names = 14; + + // L7 information. This field is set if and only if FlowType is L7. + Layer7 l7 = 15; + + // Deprecated. This suffers from false negatives due to protobuf not being + // able to distinguish between the value being false or it being absent. + // Please use is_reply instead. + bool reply = 16 [deprecated=true]; + + reserved 17, 18; // removed, do not use + + // EventType of the originating Cilium event + CiliumEventType event_type = 19; + + // source_service contains the service name of the source + Service source_service = 20; + // destination_service contains the service name of the destination + Service destination_service = 21; + + // traffic_direction of the connection, e.g. ingress or egress + TrafficDirection traffic_direction = 22; + + // policy_match_type is only applicable to the cilium event type PolicyVerdict + // https://github.com/cilium/cilium/blob/e831859b5cc336c6d964a6d35bbd34d1840e21b9/pkg/monitor/datapath_policy.go#L50 + uint32 policy_match_type = 23; + + // Only applicable to cilium trace notifications, blank for other types. + TraceObservationPoint trace_observation_point = 24; + + // only applicable to Verdict = DROPPED. + DropReason drop_reason_desc = 25; + + // is_reply indicates that this was a packet (L4) or message (L7) in the + // reply direction. May be absent (in which case it is unknown whether it + // is a reply or not). + google.protobuf.BoolValue is_reply = 26; + + // Only applicable to cilium debug capture events, blank for other types + DebugCapturePoint debug_capture_point = 27; + + // interface is the network interface on which this flow was observed + NetworkInterface interface = 28; + + // proxy_port indicates the port of the proxy to which the flow was forwarded + uint32 proxy_port = 29; + + // trace_context contains information about a trace related to the flow, if + // any. + TraceContext trace_context = 30; + + // sock_xlate_point is the socket translation point. + // Only applicable to TraceSock notifications, blank for other types + SocketTranslationPoint sock_xlate_point = 31; + + // socket_cookie is the Linux kernel socket cookie for this flow. + // Only applicable to TraceSock notifications, zero for other types + uint64 socket_cookie = 32; + + // cgroup_id of the process which emitted this event. + // Only applicable to TraceSock notifications, zero for other types + uint64 cgroup_id = 33; + + // This is a temporary workaround to support summary field for pb.Flow without + // duplicating logic from the old parser. This field will be removed once we + // fully migrate to the new parser. + string Summary = 100000 [deprecated=true]; + + // extensions can be used to add arbitrary additional metadata to flows. + // This can be used to extend functionality for other Hubble compatible + // APIs, or experiment with new functionality without needing to change the public API. + google.protobuf.Any extensions = 150000; + + // The CiliumNetworkPolicies allowing the egress of the flow. + repeated Policy egress_allowed_by = 21001; + // The CiliumNetworkPolicies allowing the ingress of the flow. + repeated Policy ingress_allowed_by = 21002; +} + +enum FlowType { + UNKNOWN_TYPE = 0; + L3_L4 = 1; // not sure about the underscore here, but `L34` also reads strange + L7 = 2; + SOCK = 3; +} + +// These types correspond to definitions in pkg/policy/l4.go. +enum AuthType { + DISABLED = 0; + SPIRE = 1; + TEST_ALWAYS_FAIL = 2; +} + +enum TraceObservationPoint { + // Cilium treats 0 as TO_LXC, but its's something we should work to remove. + // This is intentionally set as unknown, so proto API can guarantee the + // observation point is always going to be present on trace events. + UNKNOWN_POINT = 0; + + // TO_PROXY indicates network packets are transmitted towards the l7 proxy. + TO_PROXY = 1; + // TO_HOST indicates network packets are transmitted towards the host + // namespace. + TO_HOST = 2; + // TO_STACK indicates network packets are transmitted towards the Linux + // kernel network stack on host machine. + TO_STACK = 3; + // TO_OVERLAY indicates network packets are transmitted towards the tunnel + // device. + TO_OVERLAY = 4; + // TO_ENDPOINT indicates network packets are transmitted towards endpoints + // (containers). + TO_ENDPOINT = 101; + // FROM_ENDPOINT indicates network packets were received from endpoints + // (containers). + FROM_ENDPOINT = 5; + // FROM_PROXY indicates network packets were received from the l7 proxy. + FROM_PROXY = 6; + // FROM_HOST indicates network packets were received from the host + // namespace. + FROM_HOST = 7; + // FROM_STACK indicates network packets were received from the Linux kernel + // network stack on host machine. + FROM_STACK = 8; + // FROM_OVERLAY indicates network packets were received from the tunnel + // device. + FROM_OVERLAY = 9; + // FROM_NETWORK indicates network packets were received from native + // devices. + FROM_NETWORK = 10; + // TO_NETWORK indicates network packets are transmitted towards native + // devices. + TO_NETWORK = 11; +} + +message Layer4 { + oneof protocol { + TCP TCP = 1; + UDP UDP = 2; + // ICMP is technically not L4, but mutually exclusive with the above + ICMPv4 ICMPv4 = 3; + ICMPv6 ICMPv6 = 4; + SCTP SCTP = 5; + } +} + +// This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26): +enum L7FlowType { + UNKNOWN_L7_TYPE = 0; + REQUEST = 1; + RESPONSE = 2; + SAMPLE = 3; +} + +// Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141): +message Layer7 { + L7FlowType type = 1; + // Latency of the response + uint64 latency_ns = 2; + // L7 field. This field is set if and only if FlowType is L7. + oneof record { + DNS dns = 100; + HTTP http = 101; + Kafka kafka = 102; + } +} + +// TraceContext contains trace context propagation data, i.e. information about a +// distributed trace. +// For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/). +message TraceContext { + // parent identifies the incoming request in a tracing system. + TraceParent parent = 1; +} + +// TraceParent identifies the incoming request in a tracing system. +message TraceParent { + // trace_id is a unique value that identifies a trace. It is a byte array + // represented as a hex string. + string trace_id = 1; +} + +message Endpoint { + uint32 ID = 1; + uint32 identity = 2; + string namespace = 3; + // labels in `foo=bar` format. + repeated string labels = 4; + string pod_name = 5; + repeated Workload workloads = 6; +} + +message Workload { + string name = 1; + string kind = 2; +} + +message TCP { + uint32 source_port = 1; + uint32 destination_port = 2; + TCPFlags flags = 3; +} + +message IP { + string source = 1; + string destination = 2; + IPVersion ipVersion = 3; + // This field indicates whether the TraceReasonEncryptMask is set or not. + // https://github.com/cilium/cilium/blob/ba0ed147bd5bb342f67b1794c2ad13c6e99d5236/pkg/monitor/datapath_trace.go#L27 + bool encrypted = 4; +} + +message Ethernet { + string source = 1; + string destination = 2; +} + +message TCPFlags { + bool FIN = 1; + bool SYN = 2; + bool RST = 3; + bool PSH = 4; + bool ACK = 5; + bool URG = 6; + bool ECE = 7; + bool CWR = 8; + bool NS = 9; +} + +message UDP { + uint32 source_port = 1; + uint32 destination_port = 2; +} + +message SCTP { + uint32 source_port = 1; + uint32 destination_port = 2; +} + +message ICMPv4 { + uint32 type = 1; + uint32 code = 2; +} + +message ICMPv6 { + uint32 type = 1; + uint32 code = 2; +} + +enum IPVersion { + IP_NOT_USED = 0; + IPv4 = 1; + IPv6 = 2; +} + +enum Verdict { + // UNKNOWN is used if there is no verdict for this flow event + VERDICT_UNKNOWN = 0; + // FORWARDED is used for flow events where the trace point has forwarded + // this packet or connection to the next processing entity. + FORWARDED = 1; + // DROPPED is used for flow events where the connection or packet has + // been dropped (e.g. due to a malformed packet, it being rejected by a + // network policy etc). The exact drop reason may be found in drop_reason_desc. + DROPPED = 2; + // ERROR is used for flow events where an error occurred during processing + ERROR = 3; + // AUDIT is used on policy verdict events in policy audit mode, to + // denominate flows that would have been dropped by policy if audit mode + // was turned off + AUDIT = 4; + // REDIRECTED is used for flow events which have been redirected to the proxy + REDIRECTED = 5; + // TRACED is used for flow events which have been observed at a trace point, + // but no particular verdict has been reached yet + TRACED = 6; + // TRANSLATED is used for flow events where an address has been translated + TRANSLATED = 7; +} + +// These values are shared with pkg/monitor/api/drop.go and bpf/lib/common.h. +// Note that non-drop reasons (i.e. values less than api.DropMin) are not used +// here. +enum DropReason { + // non-drop reasons + DROP_REASON_UNKNOWN = 0; + // drop reasons + INVALID_SOURCE_MAC = 130; + INVALID_DESTINATION_MAC = 131; + INVALID_SOURCE_IP = 132; + POLICY_DENIED = 133; + INVALID_PACKET_DROPPED = 134; + CT_TRUNCATED_OR_INVALID_HEADER = 135; + CT_MISSING_TCP_ACK_FLAG = 136; + CT_UNKNOWN_L4_PROTOCOL = 137; + CT_CANNOT_CREATE_ENTRY_FROM_PACKET = 138; + UNSUPPORTED_L3_PROTOCOL = 139; + MISSED_TAIL_CALL = 140; + ERROR_WRITING_TO_PACKET = 141; + UNKNOWN_L4_PROTOCOL = 142; + UNKNOWN_ICMPV4_CODE = 143; + UNKNOWN_ICMPV4_TYPE = 144; + UNKNOWN_ICMPV6_CODE = 145; + UNKNOWN_ICMPV6_TYPE = 146; + ERROR_RETRIEVING_TUNNEL_KEY = 147; + ERROR_RETRIEVING_TUNNEL_OPTIONS = 148; + INVALID_GENEVE_OPTION = 149; + UNKNOWN_L3_TARGET_ADDRESS = 150; + STALE_OR_UNROUTABLE_IP = 151; + NO_MATCHING_LOCAL_CONTAINER_FOUND = 152; + ERROR_WHILE_CORRECTING_L3_CHECKSUM = 153; + ERROR_WHILE_CORRECTING_L4_CHECKSUM = 154; + CT_MAP_INSERTION_FAILED = 155; + INVALID_IPV6_EXTENSION_HEADER = 156; + IP_FRAGMENTATION_NOT_SUPPORTED = 157; + SERVICE_BACKEND_NOT_FOUND = 158; + NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT = 160; + FAILED_TO_INSERT_INTO_PROXYMAP = 161; + REACHED_EDT_RATE_LIMITING_DROP_HORIZON = 162; + UNKNOWN_CONNECTION_TRACKING_STATE = 163; + LOCAL_HOST_IS_UNREACHABLE = 164; + NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION = 165; + UNSUPPORTED_L2_PROTOCOL = 166; + NO_MAPPING_FOR_NAT_MASQUERADE = 167; + UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE = 168; + FIB_LOOKUP_FAILED = 169; + ENCAPSULATION_TRAFFIC_IS_PROHIBITED = 170; + INVALID_IDENTITY = 171; + UNKNOWN_SENDER = 172; + NAT_NOT_NEEDED = 173; + IS_A_CLUSTERIP = 174; + FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND = 175; + FORBIDDEN_ICMPV6_MESSAGE = 176; + DENIED_BY_LB_SRC_RANGE_CHECK = 177; + SOCKET_LOOKUP_FAILED = 178; + SOCKET_ASSIGN_FAILED = 179; + PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL = 180; + POLICY_DENY = 181; + VLAN_FILTERED = 182; + INVALID_VNI = 183; + INVALID_TC_BUFFER = 184; + NO_SID = 185; + MISSING_SRV6_STATE = 186; + NAT46 = 187; + NAT64 = 188; + AUTH_REQUIRED = 189; + CT_NO_MAP_FOUND = 190; + SNAT_NO_MAP_FOUND = 191; + INVALID_CLUSTER_ID = 192; + UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP = 193; + NO_EGRESS_GATEWAY = 194; + UNENCRYPTED_TRAFFIC = 195; + TTL_EXCEEDED = 196; + NO_NODE_ID = 197; + DROP_RATE_LIMITED = 198; + IGMP_HANDLED = 199; + IGMP_SUBSCRIBED = 200; + MULTICAST_HANDLED = 201; + // A BPF program wants to tail call into bpf_host, but the host datapath + // hasn't been loaded yet. + DROP_HOST_NOT_READY = 202; + // A BPF program wants to tail call some endpoint's policy program in the + // POLICY_CALL_MAP, but the program is not available. + DROP_EP_NOT_READY = 203; +} + +enum TrafficDirection { + TRAFFIC_DIRECTION_UNKNOWN = 0; + INGRESS = 1; + EGRESS = 2; +} + +// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. +enum DebugCapturePoint { + DBG_CAPTURE_POINT_UNKNOWN = 0; + reserved 1 to 3; + DBG_CAPTURE_DELIVERY = 4; + DBG_CAPTURE_FROM_LB = 5; + DBG_CAPTURE_AFTER_V46 = 6; + DBG_CAPTURE_AFTER_V64 = 7; + DBG_CAPTURE_PROXY_PRE = 8; + DBG_CAPTURE_PROXY_POST = 9; + DBG_CAPTURE_SNAT_PRE = 10; + DBG_CAPTURE_SNAT_POST = 11; +} + +message Policy { + string name = 1; + string namespace = 2; + repeated string labels = 3; + uint64 revision = 4; +} + +// EventTypeFilter is a filter describing a particular event type. +message EventTypeFilter { + // type is the primary flow type as defined by: + // github.com/cilium/cilium/pkg/monitor/api.MessageType* + int32 type = 1; + + // match_sub_type is set to true when matching on the sub_type should + // be done. This flag is required as 0 is a valid sub_type. + bool match_sub_type = 2; + + // sub_type is the secondary type, e.g. + // - github.com/cilium/cilium/pkg/monitor/api.Trace* + int32 sub_type = 3; +} + +// CiliumEventType from which the flow originated. +message CiliumEventType { + // type of event the flow originated from, i.e. + // github.com/cilium/cilium/pkg/monitor/api.MessageType* + int32 type = 1; + // sub_type may indicate more details depending on type, e.g. + // - github.com/cilium/cilium/pkg/monitor/api.Trace* + // - github.com/cilium/cilium/pkg/monitor/api.Drop* + // - github.com/cilium/cilium/pkg/monitor/api.DbgCapture* + int32 sub_type = 2; +} + +// FlowFilter represent an individual flow filter. All fields are optional. If +// multiple fields are set, then all fields must match for the filter to match. +message FlowFilter { + // uuid filters by a list of flow uuids. + repeated string uuid = 29; + // source_ip filters by a list of source ips. Each of the source ips can be + // specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g. + // "1.1.1.0/24"). + repeated string source_ip = 1; + // source_pod filters by a list of source pod name prefixes, optionally + // within a given namespace (e.g. "xwing", "kube-system/coredns-"). + // The pod name can be omitted to only filter by namespace + // (e.g. "kube-system/") or the namespace can be omitted to filter for + // pods in any namespace (e.g. "/xwing") + repeated string source_pod = 2; + // source_fqdn filters by a list of source fully qualified domain names + repeated string source_fqdn = 7; + // source_labels filters on a list of source label selectors. Selectors + // support the full Kubernetes label selector syntax. + repeated string source_label = 10; + // source_service filters on a list of source service names. This field + // supports the same syntax as the source_pod field. + repeated string source_service = 16; + // source_workload filters by a list of source workload. + repeated Workload source_workload = 26; + + // destination_ip filters by a list of destination ips. Each of the + // destination ips can be specified as an exact match (e.g. "1.1.1.1") or + // as a CIDR range (e.g. "1.1.1.0/24"). + repeated string destination_ip = 3; + // destination_pod filters by a list of destination pod names + repeated string destination_pod = 4; + // destination_fqdn filters by a list of destination fully qualified domain names + repeated string destination_fqdn = 8; + // destination_label filters on a list of destination label selectors + repeated string destination_label = 11; + // destination_service filters on a list of destination service names + repeated string destination_service = 17; + // destination_workload filters by a list of destination workload. + repeated Workload destination_workload = 27; + + // traffic_direction filters flow by direction of the connection, e.g. + // ingress or egress. + repeated TrafficDirection traffic_direction = 30; + + // only return Flows that were classified with a particular verdict. + repeated Verdict verdict = 5; + // event_type is the list of event types to filter on + repeated EventTypeFilter event_type = 6; + // http_status_code is a list of string prefixes (e.g. "4+", "404", "5+") + // to filter on the HTTP status code + repeated string http_status_code = 9; + + // protocol filters flows by L4 or L7 protocol, e.g. (e.g. "tcp", "http") + repeated string protocol = 12; + + // source_port filters flows by L4 source port + repeated string source_port = 13; + // destination_port filters flows by L4 destination port + repeated string destination_port = 14; + // reply filters flows based on the direction of the flow. + repeated bool reply = 15; + // dns_query filters L7 DNS flows by query patterns (RE2 regex), e.g. 'kube.*local'. + repeated string dns_query = 18; + // source_identity filters by the security identity of the source endpoint. + repeated uint32 source_identity = 19; + // destination_identity filters by the security identity of the destination endpoint. + repeated uint32 destination_identity = 20; + + // GET, POST, PUT, etc. methods. This type of field is well suited for an + // enum but every single existing place is using a string already. + repeated string http_method = 21; + // http_path is a list of regular expressions to filter on the HTTP path. + repeated string http_path = 22; + // http_url is a list of regular expressions to filter on the HTTP URL. + repeated string http_url = 31; + // http_header is a list of key:value pairs to filter on the HTTP headers. + repeated HTTPHeader http_header = 32; + + // tcp_flags filters flows based on TCP header flags + repeated TCPFlags tcp_flags = 23; + + // node_name is a list of patterns to filter on the node name, e.g. "k8s*", + // "test-cluster/*.domain.com", "cluster-name/" etc. + repeated string node_name = 24; + + // filter based on IP version (ipv4 or ipv6) + repeated IPVersion ip_version = 25; + + // trace_id filters flows by trace ID + repeated string trace_id = 28; +} + +// EventType are constants are based on the ones from . +enum EventType { + UNKNOWN = 0; + // EventSample is equivalent to PERF_RECORD_SAMPLE. + EventSample = 9; + // RecordLost is equivalent to PERF_RECORD_LOST. + RecordLost = 2; +} + +// DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264): +message DNS { + // DNS name that's being looked up: e.g. "isovalent.com." + string query = 1; + // List of IP addresses in the DNS response. + repeated string ips = 2; + // TTL in the DNS response. + uint32 ttl = 3; + // List of CNames in the DNS response. + repeated string cnames = 4; + // Corresponds to DNSDataSource defined in: + // https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L253 + string observation_source = 5; + // Return code of the DNS request defined in: + // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6 + uint32 rcode = 6; + // String representation of qtypes defined in: + // https://tools.ietf.org/html/rfc1035#section-3.2.3 + repeated string qtypes = 7; + // String representation of rrtypes defined in: + // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4 + repeated string rrtypes = 8; +} + +message HTTPHeader { + string key = 1; + string value = 2; +} + +// L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type. +message HTTP { + uint32 code = 1; + string method = 2; + string url = 3; + string protocol = 4; + repeated HTTPHeader headers = 5; +} + +// L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type. +message Kafka { + int32 error_code = 1; + int32 api_version = 2; + string api_key = 3; + int32 correlation_id = 4; + string topic = 5; +} + +message Service { + string name = 1; + string namespace = 2; +} + +enum LostEventSource { + UNKNOWN_LOST_EVENT_SOURCE = 0; + // PERF_EVENT_RING_BUFFER indicates that events were dropped in the BPF + // perf event ring buffer, indicating that userspace agent did not keep up + // with the events produced by the datapath. + PERF_EVENT_RING_BUFFER = 1; + // OBSERVER_EVENTS_QUEUE indicates that events were dropped because the + // Hubble events queue was full, indicating that the Hubble observer did + // not keep up. + OBSERVER_EVENTS_QUEUE = 2; + + // HUBBLE_RING_BUFFER indicates that the event was dropped because it could + // not be read from Hubble's ring buffer in time before being overwritten. + HUBBLE_RING_BUFFER = 3; +} + +// LostEvent is a message which notifies consumers about a loss of events +// that happened before the events were captured by Hubble. +message LostEvent { + // source is the location where events got lost. + LostEventSource source = 1; + // num_events_lost is the number of events that haven been lost at source. + uint64 num_events_lost = 2; + // cpu on which the event was lost if the source of lost events is + // PERF_EVENT_RING_BUFFER. + google.protobuf.Int32Value cpu = 3; +} + +// AgentEventType is the type of agent event. These values are shared with type +// AgentNotification in pkg/monitor/api/types.go. +enum AgentEventType { + AGENT_EVENT_UNKNOWN = 0; + // used for AGENT_EVENT_GENERIC in monitor API, but there are currently no + // such events; + reserved 1; + AGENT_STARTED = 2; + POLICY_UPDATED = 3; + POLICY_DELETED = 4; + ENDPOINT_REGENERATE_SUCCESS = 5; + ENDPOINT_REGENERATE_FAILURE = 6; + ENDPOINT_CREATED = 7; + ENDPOINT_DELETED = 8; + IPCACHE_UPSERTED = 9; + IPCACHE_DELETED = 10; + SERVICE_UPSERTED = 11; + SERVICE_DELETED = 12; +} + +message AgentEvent { + AgentEventType type = 1; + oneof notification { + AgentEventUnknown unknown = 100; + TimeNotification agent_start = 101; + // used for POLICY_UPDATED and POLICY_DELETED + PolicyUpdateNotification policy_update = 102; + // used for ENDPOINT_REGENERATE_SUCCESS and ENDPOINT_REGENERATE_FAILURE + EndpointRegenNotification endpoint_regenerate = 103; + // used for ENDPOINT_CREATED and ENDPOINT_DELETED + EndpointUpdateNotification endpoint_update = 104; + // used for IPCACHE_UPSERTED and IPCACHE_DELETED + IPCacheNotification ipcache_update = 105; + ServiceUpsertNotification service_upsert = 106; + ServiceDeleteNotification service_delete = 107; + } +} + +message AgentEventUnknown { + string type = 1; + string notification = 2; +} + +message TimeNotification { + google.protobuf.Timestamp time = 1; +} + +message PolicyUpdateNotification { + repeated string labels = 1; + uint64 revision = 2; + int64 rule_count = 3; +} + +message EndpointRegenNotification { + uint64 id = 1; + repeated string labels = 2; + string error = 3; +} + +message EndpointUpdateNotification { + uint64 id = 1; + repeated string labels = 2; + string error = 3; + string pod_name = 4; + string namespace = 5; +} + +message IPCacheNotification { + string cidr = 1; + uint32 identity = 2; + google.protobuf.UInt32Value old_identity = 3; + string host_ip = 4; + string old_host_ip = 5; + uint32 encrypt_key = 6; + string namespace = 7; + string pod_name = 8; +} + +message ServiceUpsertNotificationAddr { + string ip = 1; + uint32 port = 2; +} + +message ServiceUpsertNotification { + uint32 id = 1; + ServiceUpsertNotificationAddr frontend_address = 2; + repeated ServiceUpsertNotificationAddr backend_addresses = 3; + string type = 4; + string traffic_policy = 5 [deprecated = true]; + string name = 6; + string namespace = 7; + string ext_traffic_policy = 8; + string int_traffic_policy = 9; +} + +message ServiceDeleteNotification { + uint32 id = 1; +} + +message NetworkInterface { + uint32 index = 1; + string name = 2; +} + +// This mirrors enum xlate_point in bpf/lib/trace_sock.h +enum SocketTranslationPoint { + SOCK_XLATE_POINT_UNKNOWN = 0; + SOCK_XLATE_POINT_PRE_DIRECTION_FWD = 1; // Pre service translation + SOCK_XLATE_POINT_POST_DIRECTION_FWD = 2; // Post service translation + SOCK_XLATE_POINT_PRE_DIRECTION_REV = 3; // Pre reverse service translation + SOCK_XLATE_POINT_POST_DIRECTION_REV = 4; // Post reverse service translation +} + +message DebugEvent { + DebugEventType type = 1; + Endpoint source = 2; + google.protobuf.UInt32Value hash = 3; + google.protobuf.UInt32Value arg1 = 4; + google.protobuf.UInt32Value arg2 = 5; + google.protobuf.UInt32Value arg3 = 6; + string message = 7; + google.protobuf.Int32Value cpu = 8; +} + +// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. +enum DebugEventType { + DBG_EVENT_UNKNOWN = 0; + DBG_GENERIC = 1; + DBG_LOCAL_DELIVERY = 2; + DBG_ENCAP = 3; + DBG_LXC_FOUND = 4; + DBG_POLICY_DENIED = 5; + DBG_CT_LOOKUP = 6; + DBG_CT_LOOKUP_REV = 7; + DBG_CT_MATCH = 8; + DBG_CT_CREATED = 9; + DBG_CT_CREATED2 = 10; + DBG_ICMP6_HANDLE = 11; + DBG_ICMP6_REQUEST = 12; + DBG_ICMP6_NS = 13; + DBG_ICMP6_TIME_EXCEEDED = 14; + DBG_CT_VERDICT = 15; + DBG_DECAP = 16; + DBG_PORT_MAP = 17; + DBG_ERROR_RET = 18; + DBG_TO_HOST = 19; + DBG_TO_STACK = 20; + DBG_PKT_HASH = 21; + DBG_LB6_LOOKUP_FRONTEND = 22; + DBG_LB6_LOOKUP_FRONTEND_FAIL = 23; + DBG_LB6_LOOKUP_BACKEND_SLOT = 24; + DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS = 25; + DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL = 26; + DBG_LB6_LOOKUP_BACKEND_FAIL = 27; + DBG_LB6_REVERSE_NAT_LOOKUP = 28; + DBG_LB6_REVERSE_NAT = 29; + DBG_LB4_LOOKUP_FRONTEND = 30; + DBG_LB4_LOOKUP_FRONTEND_FAIL = 31; + DBG_LB4_LOOKUP_BACKEND_SLOT = 32; + DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS = 33; + DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL = 34; + DBG_LB4_LOOKUP_BACKEND_FAIL = 35; + DBG_LB4_REVERSE_NAT_LOOKUP = 36; + DBG_LB4_REVERSE_NAT = 37; + DBG_LB4_LOOPBACK_SNAT = 38; + DBG_LB4_LOOPBACK_SNAT_REV = 39; + DBG_CT_LOOKUP4 = 40; + DBG_RR_BACKEND_SLOT_SEL = 41; + DBG_REV_PROXY_LOOKUP = 42; + DBG_REV_PROXY_FOUND = 43; + DBG_REV_PROXY_UPDATE = 44; + DBG_L4_POLICY = 45; + DBG_NETDEV_IN_CLUSTER = 46; + DBG_NETDEV_ENCAP4 = 47; + DBG_CT_LOOKUP4_1 = 48; + DBG_CT_LOOKUP4_2 = 49; + DBG_CT_CREATED4 = 50; + DBG_CT_LOOKUP6_1 = 51; + DBG_CT_LOOKUP6_2 = 52; + DBG_CT_CREATED6 = 53; + DBG_SKIP_PROXY = 54; + DBG_L4_CREATE = 55; + DBG_IP_ID_MAP_FAILED4 = 56; + DBG_IP_ID_MAP_FAILED6 = 57; + DBG_IP_ID_MAP_SUCCEED4 = 58; + DBG_IP_ID_MAP_SUCCEED6 = 59; + DBG_LB_STALE_CT = 60; + DBG_INHERIT_IDENTITY = 61; + DBG_SK_LOOKUP4 = 62; + DBG_SK_LOOKUP6 = 63; + DBG_SK_ASSIGN = 64; + DBG_L7_LB = 65; + DBG_SKIP_POLICY = 66; +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go new file mode 100644 index 0000000000..5093e85ef7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go @@ -0,0 +1,56 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpFamily Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path +// +// swagger:model BgpFamily +type BgpFamily struct { + + // Address Family Indicator (AFI) of the path + Afi string `json:"afi,omitempty"` + + // Subsequent Address Family Indicator (SAFI) of the path + Safi string `json:"safi,omitempty"` +} + +// Validate validates this bgp family +func (m *BgpFamily) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp family based on context it is used +func (m *BgpFamily) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpFamily) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpFamily) UnmarshalBinary(b []byte) error { + var res BgpFamily + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go new file mode 100644 index 0000000000..a6455398f3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpNlri Network Layer Reachability Information (NLRI) of the path +// +// swagger:model BgpNlri +type BgpNlri struct { + + // Base64-encoded NLRI in the BGP UPDATE message format + Base64 string `json:"base64,omitempty"` +} + +// Validate validates this bgp nlri +func (m *BgpNlri) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp nlri based on context it is used +func (m *BgpNlri) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpNlri) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpNlri) UnmarshalBinary(b []byte) error { + var res BgpNlri + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go new file mode 100644 index 0000000000..6a8f49befc --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go @@ -0,0 +1,220 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpPath Single BGP routing Path containing BGP Network Layer Reachability Information (NLRI) and path attributes +// +// swagger:model BgpPath +type BgpPath struct { + + // Age of the path (time since its creation) in nanoseconds + AgeNanoseconds int64 `json:"age-nanoseconds,omitempty"` + + // True value flags the best path towards the destination prefix + Best bool `json:"best,omitempty"` + + // Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path + Family *BgpFamily `json:"family,omitempty"` + + // Network Layer Reachability Information of the path + Nlri *BgpNlri `json:"nlri,omitempty"` + + // List of BGP path attributes specific for the path + PathAttributes []*BgpPathAttribute `json:"path-attributes"` + + // True value marks the path as stale + Stale bool `json:"stale,omitempty"` +} + +// Validate validates this bgp path +func (m *BgpPath) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFamily(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNlri(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePathAttributes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpPath) validateFamily(formats strfmt.Registry) error { + if swag.IsZero(m.Family) { // not required + return nil + } + + if m.Family != nil { + if err := m.Family.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("family") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("family") + } + return err + } + } + + return nil +} + +func (m *BgpPath) validateNlri(formats strfmt.Registry) error { + if swag.IsZero(m.Nlri) { // not required + return nil + } + + if m.Nlri != nil { + if err := m.Nlri.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nlri") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nlri") + } + return err + } + } + + return nil +} + +func (m *BgpPath) validatePathAttributes(formats strfmt.Registry) error { + if swag.IsZero(m.PathAttributes) { // not required + return nil + } + + for i := 0; i < len(m.PathAttributes); i++ { + if swag.IsZero(m.PathAttributes[i]) { // not required + continue + } + + if m.PathAttributes[i] != nil { + if err := m.PathAttributes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this bgp path based on the context it is used +func (m *BgpPath) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateFamily(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateNlri(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePathAttributes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpPath) contextValidateFamily(ctx context.Context, formats strfmt.Registry) error { + + if m.Family != nil { + if err := m.Family.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("family") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("family") + } + return err + } + } + + return nil +} + +func (m *BgpPath) contextValidateNlri(ctx context.Context, formats strfmt.Registry) error { + + if m.Nlri != nil { + if err := m.Nlri.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nlri") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nlri") + } + return err + } + } + + return nil +} + +func (m *BgpPath) contextValidatePathAttributes(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.PathAttributes); i++ { + + if m.PathAttributes[i] != nil { + if err := m.PathAttributes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpPath) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpPath) UnmarshalBinary(b []byte) error { + var res BgpPath + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go new file mode 100644 index 0000000000..cd92929473 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpPathAttribute Single BGP path attribute specific for the path +// +// swagger:model BgpPathAttribute +type BgpPathAttribute struct { + + // Base64-encoded BGP path attribute in the BGP UPDATE message format + Base64 string `json:"base64,omitempty"` +} + +// Validate validates this bgp path attribute +func (m *BgpPathAttribute) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp path attribute based on context it is used +func (m *BgpPathAttribute) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpPathAttribute) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpPathAttribute) UnmarshalBinary(b []byte) error { + var res BgpPathAttribute + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go index 7b22ce0502..59a63154ca 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go @@ -78,6 +78,9 @@ type BgpPeer struct { // SessionState string `json:"session-state,omitempty"` + // Set when a TCP password is configured for communications with this peer + TCPPasswordEnabled bool `json:"tcp-password-enabled,omitempty"` + // BGP peer connection uptime in nano seconds. UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go new file mode 100644 index 0000000000..0c301f815f --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go @@ -0,0 +1,128 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpRoute Single BGP route retrieved from the RIB of underlying router +// +// swagger:model BgpRoute +type BgpRoute struct { + + // IP address specifying a BGP neighbor if the source table type is adj-rib-in or adj-rib-out + Neighbor string `json:"neighbor,omitempty"` + + // List of routing paths leading towards the prefix + Paths []*BgpPath `json:"paths"` + + // IP prefix of the route + Prefix string `json:"prefix,omitempty"` + + // Autonomous System Number (ASN) identifying a BGP virtual router instance + RouterAsn int64 `json:"router-asn,omitempty"` +} + +// Validate validates this bgp route +func (m *BgpRoute) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePaths(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoute) validatePaths(formats strfmt.Registry) error { + if swag.IsZero(m.Paths) { // not required + return nil + } + + for i := 0; i < len(m.Paths); i++ { + if swag.IsZero(m.Paths[i]) { // not required + continue + } + + if m.Paths[i] != nil { + if err := m.Paths[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("paths" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("paths" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this bgp route based on the context it is used +func (m *BgpRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePaths(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoute) contextValidatePaths(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Paths); i++ { + + if m.Paths[i] != nil { + if err := m.Paths[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("paths" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("paths" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoute) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoute) UnmarshalBinary(b []byte) error { + var res BgpRoute + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go new file mode 100644 index 0000000000..d5d484d10c --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go @@ -0,0 +1,177 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpRoutePolicy Single BGP route policy retrieved from the underlying router +// +// swagger:model BgpRoutePolicy +type BgpRoutePolicy struct { + + // Name of the route policy + Name string `json:"name,omitempty"` + + // Autonomous System Number (ASN) identifying a BGP virtual router instance + RouterAsn int64 `json:"router-asn,omitempty"` + + // List of the route policy statements + Statements []*BgpRoutePolicyStatement `json:"statements"` + + // Type of the route policy + // Enum: [export import] + Type string `json:"type,omitempty"` +} + +// Validate validates this bgp route policy +func (m *BgpRoutePolicy) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatements(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicy) validateStatements(formats strfmt.Registry) error { + if swag.IsZero(m.Statements) { // not required + return nil + } + + for i := 0; i < len(m.Statements); i++ { + if swag.IsZero(m.Statements[i]) { // not required + continue + } + + if m.Statements[i] != nil { + if err := m.Statements[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statements" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statements" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var bgpRoutePolicyTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["export","import"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + bgpRoutePolicyTypeTypePropEnum = append(bgpRoutePolicyTypeTypePropEnum, v) + } +} + +const ( + + // BgpRoutePolicyTypeExport captures enum value "export" + BgpRoutePolicyTypeExport string = "export" + + // BgpRoutePolicyTypeImport captures enum value "import" + BgpRoutePolicyTypeImport string = "import" +) + +// prop value enum +func (m *BgpRoutePolicy) validateTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, bgpRoutePolicyTypeTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BgpRoutePolicy) validateType(formats strfmt.Registry) error { + if swag.IsZero(m.Type) { // not required + return nil + } + + // value enum + if err := m.validateTypeEnum("type", "body", m.Type); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp route policy based on the context it is used +func (m *BgpRoutePolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateStatements(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicy) contextValidateStatements(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Statements); i++ { + + if m.Statements[i] != nil { + if err := m.Statements[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statements" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statements" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicy) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicy) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicy + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go new file mode 100644 index 0000000000..993f91dd6a --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpRoutePolicyPrefixMatch Matches a CIDR prefix in a BGP route policy +// +// swagger:model BgpRoutePolicyPrefixMatch +type BgpRoutePolicyPrefixMatch struct { + + // CIDR prefix to match with + Cidr string `json:"cidr,omitempty"` + + // Maximal prefix length that will match if it falls under CIDR + PrefixLenMax int64 `json:"prefix-len-max,omitempty"` + + // Minimal prefix length that will match if it falls under CIDR + PrefixLenMin int64 `json:"prefix-len-min,omitempty"` +} + +// Validate validates this bgp route policy prefix match +func (m *BgpRoutePolicyPrefixMatch) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp route policy prefix match based on context it is used +func (m *BgpRoutePolicyPrefixMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicyPrefixMatch) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicyPrefixMatch) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicyPrefixMatch + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go new file mode 100644 index 0000000000..09cdbd47e9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpRoutePolicyStatement Single BGP route policy statement +// +// swagger:model BgpRoutePolicyStatement +type BgpRoutePolicyStatement struct { + + // List of BGP standard community values to be added to the matched route + AddCommunities []string `json:"add-communities"` + + // List of BGP large community values to be added to the matched route + AddLargeCommunities []string `json:"add-large-communities"` + + // Matches any of the provided BGP neighbor IP addresses. If empty matches all neighbors. + MatchNeighbors []string `json:"match-neighbors"` + + // Matches any of the provided prefixes. If empty matches all prefixes. + MatchPrefixes []*BgpRoutePolicyPrefixMatch `json:"match-prefixes"` + + // RIB processing action taken on the matched route + // Enum: [none accept reject] + RouteAction string `json:"route-action,omitempty"` + + // BGP local preference value to be set on the matched route + SetLocalPreference int64 `json:"set-local-preference,omitempty"` +} + +// Validate validates this bgp route policy statement +func (m *BgpRoutePolicyStatement) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMatchPrefixes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRouteAction(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicyStatement) validateMatchPrefixes(formats strfmt.Registry) error { + if swag.IsZero(m.MatchPrefixes) { // not required + return nil + } + + for i := 0; i < len(m.MatchPrefixes); i++ { + if swag.IsZero(m.MatchPrefixes[i]) { // not required + continue + } + + if m.MatchPrefixes[i] != nil { + if err := m.MatchPrefixes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var bgpRoutePolicyStatementTypeRouteActionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["none","accept","reject"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + bgpRoutePolicyStatementTypeRouteActionPropEnum = append(bgpRoutePolicyStatementTypeRouteActionPropEnum, v) + } +} + +const ( + + // BgpRoutePolicyStatementRouteActionNone captures enum value "none" + BgpRoutePolicyStatementRouteActionNone string = "none" + + // BgpRoutePolicyStatementRouteActionAccept captures enum value "accept" + BgpRoutePolicyStatementRouteActionAccept string = "accept" + + // BgpRoutePolicyStatementRouteActionReject captures enum value "reject" + BgpRoutePolicyStatementRouteActionReject string = "reject" +) + +// prop value enum +func (m *BgpRoutePolicyStatement) validateRouteActionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, bgpRoutePolicyStatementTypeRouteActionPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BgpRoutePolicyStatement) validateRouteAction(formats strfmt.Registry) error { + if swag.IsZero(m.RouteAction) { // not required + return nil + } + + // value enum + if err := m.validateRouteActionEnum("route-action", "body", m.RouteAction); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp route policy statement based on the context it is used +func (m *BgpRoutePolicyStatement) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMatchPrefixes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicyStatement) contextValidateMatchPrefixes(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.MatchPrefixes); i++ { + + if m.MatchPrefixes[i] != nil { + if err := m.MatchPrefixes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicyStatement) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicyStatement) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicyStatement + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go index f55612181d..64daed2b0f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go @@ -53,6 +53,9 @@ type DaemonConfigurationStatus struct { // Immutable configuration (read-only) Immutable ConfigurationMap `json:"immutable,omitempty"` + // Comma-separated list of IP ports should be reserved in the workload network namespace + IPLocalReservedPorts string `json:"ipLocalReservedPorts,omitempty"` + // Configured IPAM mode IpamMode string `json:"ipam-mode,omitempty"` diff --git a/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go b/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go index 128dd8dd63..db539d021d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go @@ -336,7 +336,7 @@ func (m *DebugInfo) UnmarshalBinary(b []byte) error { // swagger:model DebugInfoEncryption type DebugInfoEncryption struct { - // Status of the Wireguard agent + // Status of the WireGuard agent Wireguard *WireguardStatus `json:"wireguard,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go b/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go index 4392fb0974..0cf25f3b8d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go @@ -25,14 +25,17 @@ import ( // swagger:model EncryptionStatus type EncryptionStatus struct { + // Status of the IPsec agent + Ipsec *IPsecStatus `json:"ipsec,omitempty"` + // mode // Enum: [Disabled IPsec Wireguard] Mode string `json:"mode,omitempty"` - // Human readable status/error/warning message + // Human readable error/warning message Msg string `json:"msg,omitempty"` - // Status of the Wireguard agent + // Status of the WireGuard agent Wireguard *WireguardStatus `json:"wireguard,omitempty"` } @@ -40,6 +43,10 @@ type EncryptionStatus struct { func (m *EncryptionStatus) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateIpsec(formats); err != nil { + res = append(res, err) + } + if err := m.validateMode(formats); err != nil { res = append(res, err) } @@ -54,6 +61,25 @@ func (m *EncryptionStatus) Validate(formats strfmt.Registry) error { return nil } +func (m *EncryptionStatus) validateIpsec(formats strfmt.Registry) error { + if swag.IsZero(m.Ipsec) { // not required + return nil + } + + if m.Ipsec != nil { + if err := m.Ipsec.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipsec") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ipsec") + } + return err + } + } + + return nil +} + var encryptionStatusTypeModePropEnum []interface{} func init() { @@ -122,6 +148,10 @@ func (m *EncryptionStatus) validateWireguard(formats strfmt.Registry) error { func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error + if err := m.contextValidateIpsec(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateWireguard(ctx, formats); err != nil { res = append(res, err) } @@ -132,6 +162,22 @@ func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.R return nil } +func (m *EncryptionStatus) contextValidateIpsec(ctx context.Context, formats strfmt.Registry) error { + + if m.Ipsec != nil { + if err := m.Ipsec.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipsec") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ipsec") + } + return err + } + } + + return nil +} + func (m *EncryptionStatus) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error { if m.Wireguard != nil { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go new file mode 100644 index 0000000000..795e79d6bb --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// EndpointBatchDeleteRequest Properties selecting a batch of endpoints to delete. +// +// swagger:model EndpointBatchDeleteRequest +type EndpointBatchDeleteRequest struct { + + // ID assigned by container runtime + ContainerID string `json:"container-id,omitempty"` +} + +// Validate validates this endpoint batch delete request +func (m *EndpointBatchDeleteRequest) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this endpoint batch delete request based on context it is used +func (m *EndpointBatchDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *EndpointBatchDeleteRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EndpointBatchDeleteRequest) UnmarshalBinary(b []byte) error { + var res EndpointBatchDeleteRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go index f70e9e43f5..d59e7f3a16 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go @@ -28,6 +28,9 @@ type EndpointChangeRequest struct { // ID assigned by container runtime ContainerID string `json:"container-id,omitempty"` + // Name of network device in container netns + ContainerInterfaceName string `json:"container-interface-name,omitempty"` + // Name assigned to container ContainerName string `json:"container-name,omitempty"` @@ -37,6 +40,9 @@ type EndpointChangeRequest struct { // ID of datapath tail call map DatapathMapID int64 `json:"datapath-map-id,omitempty"` + // Disables lookup using legacy endpoint identifiers (container name, container id, pod name) for this endpoint + DisableLegacyIdentifiers bool `json:"disable-legacy-identifiers,omitempty"` + // Docker endpoint ID DockerEndpointID string `json:"docker-endpoint-id,omitempty"` @@ -49,10 +55,10 @@ type EndpointChangeRequest struct { // Local endpoint ID ID int64 `json:"id,omitempty"` - // Index of network device + // Index of network device in host netns InterfaceIndex int64 `json:"interface-index,omitempty"` - // Name of network device + // Name of network device in host netns InterfaceName string `json:"interface-name,omitempty"` // Kubernetes namespace name @@ -61,6 +67,9 @@ type EndpointChangeRequest struct { // Kubernetes pod name K8sPodName string `json:"k8s-pod-name,omitempty"` + // Kubernetes pod UID + K8sUID string `json:"k8s-uid,omitempty"` + // Labels describing the identity Labels Labels `json:"labels,omitempty"` diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go index 09a26aa38d..380d26784a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go @@ -22,10 +22,13 @@ import ( // swagger:model EndpointIdentifiers type EndpointIdentifiers struct { - // ID assigned by container runtime + // ID assigned to this attachment by container runtime + CniAttachmentID string `json:"cni-attachment-id,omitempty"` + + // ID assigned by container runtime (deprecated, may not be unique) ContainerID string `json:"container-id,omitempty"` - // Name assigned to container + // Name assigned to container (deprecated, may not be unique) ContainerName string `json:"container-name,omitempty"` // Docker endpoint ID @@ -34,13 +37,13 @@ type EndpointIdentifiers struct { // Docker network ID DockerNetworkID string `json:"docker-network-id,omitempty"` - // K8s namespace for this endpoint + // K8s namespace for this endpoint (deprecated, may not be unique) K8sNamespace string `json:"k8s-namespace,omitempty"` - // K8s pod name for this endpoint + // K8s pod name for this endpoint (deprecated, may not be unique) K8sPodName string `json:"k8s-pod-name,omitempty"` - // K8s pod for this endpoint(Deprecated, use K8sPodName and K8sNamespace instead) + // K8s pod for this endpoint (deprecated, may not be unique) PodName string `json:"pod-name,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go index d322ca6390..893edd3019 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go @@ -25,16 +25,19 @@ type EndpointNetworking struct { // IP4/6 addresses assigned to this Endpoint Addressing []*AddressPair `json:"addressing"` + // Name of network device in container netns + ContainerInterfaceName string `json:"container-interface-name,omitempty"` + // host addressing HostAddressing *NodeAddressing `json:"host-addressing,omitempty"` // MAC address HostMac string `json:"host-mac,omitempty"` - // Index of network device + // Index of network device in host netns InterfaceIndex int64 `json:"interface-index,omitempty"` - // Name of network device + // Name of network device in host netns InterfaceName string `json:"interface-name,omitempty"` // MAC address diff --git a/vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go b/vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go new file mode 100644 index 0000000000..5eafc226f9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// IPsecStatus Status of the IPsec agent +// +// +k8s:deepcopy-gen=true +// +// swagger:model IPsecStatus +type IPsecStatus struct { + + // IPsec decryption interfaces + DecryptInterfaces []string `json:"decrypt-interfaces"` + + // IPsec error count + ErrorCount int64 `json:"error-count,omitempty"` + + // IPsec keys in use + KeysInUse int64 `json:"keys-in-use,omitempty"` + + // IPsec max sequence number + MaxSeqNumber string `json:"max-seq-number,omitempty"` + + // IPsec XFRM errors + XfrmErrors map[string]int64 `json:"xfrm-errors,omitempty"` +} + +// Validate validates this i psec status +func (m *IPsecStatus) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this i psec status based on context it is used +func (m *IPsecStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *IPsecStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IPsecStatus) UnmarshalBinary(b []byte) error { + var res IPsecStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go index d47308d560..c3e9561a55 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go @@ -1134,13 +1134,17 @@ func (m *KubeProxyReplacementFeaturesNat46X64Service) UnmarshalBinary(b []byte) type KubeProxyReplacementFeaturesNodePort struct { // acceleration - // Enum: [None Native Generic] + // Enum: [None Native Generic Best-Effort] Acceleration string `json:"acceleration,omitempty"` // algorithm // Enum: [Random Maglev] Algorithm string `json:"algorithm,omitempty"` + // dsr mode + // Enum: [IP Option/Extension IPIP Geneve] + DsrMode string `json:"dsrMode,omitempty"` + // enabled Enabled bool `json:"enabled,omitempty"` @@ -1170,6 +1174,10 @@ func (m *KubeProxyReplacementFeaturesNodePort) Validate(formats strfmt.Registry) res = append(res, err) } + if err := m.validateDsrMode(formats); err != nil { + res = append(res, err) + } + if err := m.validateMode(formats); err != nil { res = append(res, err) } @@ -1184,7 +1192,7 @@ var kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["None","Native","Generic"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["None","Native","Generic","Best-Effort"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -1202,6 +1210,9 @@ const ( // KubeProxyReplacementFeaturesNodePortAccelerationGeneric captures enum value "Generic" KubeProxyReplacementFeaturesNodePortAccelerationGeneric string = "Generic" + + // KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort captures enum value "Best-Effort" + KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort string = "Best-Effort" ) // prop value enum @@ -1267,6 +1278,51 @@ func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithm(formats strfmt. return nil } +var kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["IP Option/Extension","IPIP","Geneve"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, v) + } +} + +const ( + + // KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension captures enum value "IP Option/Extension" + KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension string = "IP Option/Extension" + + // KubeProxyReplacementFeaturesNodePortDsrModeIPIP captures enum value "IPIP" + KubeProxyReplacementFeaturesNodePortDsrModeIPIP string = "IPIP" + + // KubeProxyReplacementFeaturesNodePortDsrModeGeneve captures enum value "Geneve" + KubeProxyReplacementFeaturesNodePortDsrModeGeneve string = "Geneve" +) + +// prop value enum +func (m *KubeProxyReplacementFeaturesNodePort) validateDsrModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *KubeProxyReplacementFeaturesNodePort) validateDsrMode(formats strfmt.Registry) error { + if swag.IsZero(m.DsrMode) { // not required + return nil + } + + // value enum + if err := m.validateDsrModeEnum("features"+"."+"nodePort"+"."+"dsrMode", "body", m.DsrMode); err != nil { + return err + } + + return nil +} + var kubeProxyReplacementFeaturesNodePortTypeModePropEnum []interface{} func init() { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label.go b/vendor/github.com/cilium/cilium/api/v1/models/label.go new file mode 100644 index 0000000000..7e4225fee1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/label.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Label Label is the Cilium's representation of a container label +// +// swagger:model Label +type Label struct { + + // key + Key string `json:"key,omitempty"` + + // Source can be one of the above values (e.g. LabelSourceContainer) + Source string `json:"source,omitempty"` + + // value + Value string `json:"value,omitempty"` +} + +// Validate validates this label +func (m *Label) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this label based on context it is used +func (m *Label) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Label) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Label) UnmarshalBinary(b []byte) error { + var res Label + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label_array.go b/vendor/github.com/cilium/cilium/api/v1/models/label_array.go new file mode 100644 index 0000000000..ca052c05f0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/label_array.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// LabelArray LabelArray is an array of labels forming a set +// +// swagger:model LabelArray +type LabelArray []*Label + +// Validate validates this label array +func (m LabelArray) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this label array based on the context it is used +func (m LabelArray) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/module_health.go b/vendor/github.com/cilium/cilium/api/v1/models/module_health.go new file mode 100644 index 0000000000..7fd6ad30e1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/module_health.go @@ -0,0 +1,65 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ModuleHealth Report module health status +// +// swagger:model ModuleHealth +type ModuleHealth struct { + + // Time at which the last OK check occurred + LastOk string `json:"last-ok,omitempty"` + + // Time of last health update + LastUpdated string `json:"last-updated,omitempty"` + + // Describes the health status level + Level string `json:"level,omitempty"` + + // Reports the associated health message + Message string `json:"message,omitempty"` + + // Describes the module identitier + ModuleID string `json:"module-id,omitempty"` +} + +// Validate validates this module health +func (m *ModuleHealth) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this module health based on context it is used +func (m *ModuleHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ModuleHealth) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ModuleHealth) UnmarshalBinary(b []byte) error { + var res ModuleHealth + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/modules_health.go b/vendor/github.com/cilium/cilium/api/v1/models/modules_health.go new file mode 100644 index 0000000000..de7d224a1b --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/modules_health.go @@ -0,0 +1,119 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ModulesHealth Reports health status of agent's modules +// +// swagger:model ModulesHealth +type ModulesHealth struct { + + // List out modules health status + Modules []*ModuleHealth `json:"modules"` +} + +// Validate validates this modules health +func (m *ModulesHealth) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateModules(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ModulesHealth) validateModules(formats strfmt.Registry) error { + if swag.IsZero(m.Modules) { // not required + return nil + } + + for i := 0; i < len(m.Modules); i++ { + if swag.IsZero(m.Modules[i]) { // not required + continue + } + + if m.Modules[i] != nil { + if err := m.Modules[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("modules" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("modules" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this modules health based on the context it is used +func (m *ModulesHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateModules(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ModulesHealth) contextValidateModules(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Modules); i++ { + + if m.Modules[i] != nil { + if err := m.Modules[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("modules" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("modules" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ModulesHealth) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ModulesHealth) UnmarshalBinary(b []byte) error { + var res ModulesHealth + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/node_element.go b/vendor/github.com/cilium/cilium/api/v1/models/node_element.go index a3d06e6605..7f0ce2c6be 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/node_element.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/node_element.go @@ -40,6 +40,9 @@ type NodeElement struct { // Alternative addresses assigned to the node SecondaryAddresses []*NodeAddressingElement `json:"secondary-addresses"` + + // Source of the node configuration + Source string `json:"source,omitempty"` } // Validate validates this node element diff --git a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go index 400dfc3346..80aa38532d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go @@ -11,6 +11,7 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" ) @@ -23,6 +24,9 @@ type SelectorIdentityMapping struct { // identities mapping to this selector Identities []int64 `json:"identities"` + // Labels are the metadata labels associated with the selector + Labels LabelArray `json:"labels,omitempty"` + // string form of selector Selector string `json:"selector,omitempty"` @@ -32,11 +36,60 @@ type SelectorIdentityMapping struct { // Validate validates this selector identity mapping func (m *SelectorIdentityMapping) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLabels(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SelectorIdentityMapping) validateLabels(formats strfmt.Registry) error { + if swag.IsZero(m.Labels) { // not required + return nil + } + + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + return nil } -// ContextValidate validates this selector identity mapping based on context it is used +// ContextValidate validate this selector identity mapping based on the context it is used func (m *SelectorIdentityMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SelectorIdentityMapping) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + return nil } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/srv6.go b/vendor/github.com/cilium/cilium/api/v1/models/srv6.go new file mode 100644 index 0000000000..0e6531aedc --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/srv6.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Srv6 Status of the SRv6 +// +// +k8s:deepcopy-gen=true +// +// swagger:model Srv6 +type Srv6 struct { + + // enabled + Enabled bool `json:"enabled,omitempty"` + + // srv6 encap mode + // Enum: [SRH Reduced] + Srv6EncapMode string `json:"srv6EncapMode,omitempty"` +} + +// Validate validates this srv6 +func (m *Srv6) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSrv6EncapMode(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var srv6TypeSrv6EncapModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["SRH","Reduced"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + srv6TypeSrv6EncapModePropEnum = append(srv6TypeSrv6EncapModePropEnum, v) + } +} + +const ( + + // Srv6Srv6EncapModeSRH captures enum value "SRH" + Srv6Srv6EncapModeSRH string = "SRH" + + // Srv6Srv6EncapModeReduced captures enum value "Reduced" + Srv6Srv6EncapModeReduced string = "Reduced" +) + +// prop value enum +func (m *Srv6) validateSrv6EncapModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, srv6TypeSrv6EncapModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Srv6) validateSrv6EncapMode(formats strfmt.Registry) error { + if swag.IsZero(m.Srv6EncapMode) { // not required + return nil + } + + // value enum + if err := m.validateSrv6EncapModeEnum("srv6EncapMode", "body", m.Srv6EncapMode); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this srv6 based on context it is used +func (m *Srv6) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Srv6) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Srv6) UnmarshalBinary(b []byte) error { + var res Srv6 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go b/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go new file mode 100644 index 0000000000..df5ca5a801 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go @@ -0,0 +1,62 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StateDBQuery StateDB query +// +// swagger:model StateDBQuery +type StateDBQuery struct { + + // Index to query against + Index string `json:"index,omitempty"` + + // Key to query with. Base64 encoded. + Key string `json:"key,omitempty"` + + // LowerBound prefix search or full-matching Get + Lowerbound bool `json:"lowerbound,omitempty"` + + // Name of the table to query + Table string `json:"table,omitempty"` +} + +// Validate validates this state d b query +func (m *StateDBQuery) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this state d b query based on context it is used +func (m *StateDBQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StateDBQuery) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StateDBQuery) UnmarshalBinary(b []byte) error { + var res StateDBQuery + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go index 1073dd5276..52b18d92f3 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go @@ -24,6 +24,9 @@ import ( // swagger:model StatusResponse type StatusResponse struct { + // Status of Mutual Authentication certificate provider + AuthCertificateProvider *Status `json:"auth-certificate-provider,omitempty"` + // Status of bandwidth manager BandwidthManager *BandwidthManager `json:"bandwidth-manager,omitempty"` @@ -102,6 +105,9 @@ type StatusResponse struct { // Status of proxy Proxy *ProxyStatus `json:"proxy,omitempty"` + // Status of SRv6 + Srv6 *Srv6 `json:"srv6,omitempty"` + // List of stale information in the status Stale map[string]strfmt.DateTime `json:"stale,omitempty"` } @@ -110,6 +116,10 @@ type StatusResponse struct { func (m *StatusResponse) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateAuthCertificateProvider(formats); err != nil { + res = append(res, err) + } + if err := m.validateBandwidthManager(formats); err != nil { res = append(res, err) } @@ -206,6 +216,10 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateSrv6(formats); err != nil { + res = append(res, err) + } + if err := m.validateStale(formats); err != nil { res = append(res, err) } @@ -216,6 +230,25 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error { return nil } +func (m *StatusResponse) validateAuthCertificateProvider(formats strfmt.Registry) error { + if swag.IsZero(m.AuthCertificateProvider) { // not required + return nil + } + + if m.AuthCertificateProvider != nil { + if err := m.AuthCertificateProvider.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("auth-certificate-provider") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("auth-certificate-provider") + } + return err + } + } + + return nil +} + func (m *StatusResponse) validateBandwidthManager(formats strfmt.Registry) error { if swag.IsZero(m.BandwidthManager) { // not required return nil @@ -670,6 +703,25 @@ func (m *StatusResponse) validateProxy(formats strfmt.Registry) error { return nil } +func (m *StatusResponse) validateSrv6(formats strfmt.Registry) error { + if swag.IsZero(m.Srv6) { // not required + return nil + } + + if m.Srv6 != nil { + if err := m.Srv6.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srv6") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("srv6") + } + return err + } + } + + return nil +} + func (m *StatusResponse) validateStale(formats strfmt.Registry) error { if swag.IsZero(m.Stale) { // not required return nil @@ -690,6 +742,10 @@ func (m *StatusResponse) validateStale(formats strfmt.Registry) error { func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error + if err := m.contextValidateAuthCertificateProvider(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateBandwidthManager(ctx, formats); err != nil { res = append(res, err) } @@ -786,12 +842,32 @@ func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Reg res = append(res, err) } + if err := m.contextValidateSrv6(ctx, formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } +func (m *StatusResponse) contextValidateAuthCertificateProvider(ctx context.Context, formats strfmt.Registry) error { + + if m.AuthCertificateProvider != nil { + if err := m.AuthCertificateProvider.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("auth-certificate-provider") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("auth-certificate-provider") + } + return err + } + } + + return nil +} + func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, formats strfmt.Registry) error { if m.BandwidthManager != nil { @@ -1174,6 +1250,22 @@ func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfm return nil } +func (m *StatusResponse) contextValidateSrv6(ctx context.Context, formats strfmt.Registry) error { + + if m.Srv6 != nil { + if err := m.Srv6.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srv6") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("srv6") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (m *StatusResponse) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go index f73b640add..d78acc53d0 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go @@ -17,14 +17,14 @@ import ( "github.com/go-openapi/swag" ) -// WireguardInterface Status of a Wireguard interface +// WireguardInterface Status of a WireGuard interface // // +k8s:deepcopy-gen=true // // swagger:model WireguardInterface type WireguardInterface struct { - // Port on which the Wireguard endpoint is exposed + // Port on which the WireGuard endpoint is exposed ListenPort int64 `json:"listen-port,omitempty"` // Name of the interface @@ -33,7 +33,7 @@ type WireguardInterface struct { // Number of peers configured on this interface PeerCount int64 `json:"peer-count,omitempty"` - // Optional list of wireguard peers + // Optional list of WireGuard peers Peers []*WireguardPeer `json:"peers"` // Public key of this interface diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go index f1c7c12354..7d5664e2ef 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go @@ -17,7 +17,7 @@ import ( "github.com/go-openapi/validate" ) -// WireguardPeer Status of a Wireguard peer +// WireguardPeer Status of a WireGuard peer // // +k8s:deepcopy-gen=true // diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go index 98285fb5b1..041a2d3361 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go @@ -17,14 +17,14 @@ import ( "github.com/go-openapi/swag" ) -// WireguardStatus Status of the Wireguard agent +// WireguardStatus Status of the WireGuard agent // // +k8s:deepcopy-gen=true // // swagger:model WireguardStatus type WireguardStatus struct { - // Wireguard interfaces managed by this Cilium instance + // WireGuard interfaces managed by this Cilium instance Interfaces []*WireguardInterface `json:"interfaces"` // Node Encryption status diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go index 0c59fe6adf..9dcd6fd914 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go @@ -319,6 +319,11 @@ func (in *ControllerStatusStatus) DeepCopy() *ControllerStatusStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EncryptionStatus) DeepCopyInto(out *EncryptionStatus) { *out = *in + if in.Ipsec != nil { + in, out := &in.Ipsec, &out.Ipsec + *out = new(IPsecStatus) + (*in).DeepCopyInto(*out) + } if in.Wireguard != nil { in, out := &in.Wireguard, &out.Wireguard *out = new(WireguardStatus) @@ -528,6 +533,34 @@ func (in *IPV6BigTCP) DeepCopy() *IPV6BigTCP { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecStatus) DeepCopyInto(out *IPsecStatus) { + *out = *in + if in.DecryptInterfaces != nil { + in, out := &in.DecryptInterfaces, &out.DecryptInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.XfrmErrors != nil { + in, out := &in.XfrmErrors, &out.XfrmErrors + *out = make(map[string]int64, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecStatus. +func (in *IPsecStatus) DeepCopy() *IPsecStatus { + if in == nil { + return nil + } + out := new(IPsecStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IdentityRange) DeepCopyInto(out *IdentityRange) { *out = *in @@ -1214,9 +1247,30 @@ func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Srv6) DeepCopyInto(out *Srv6) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Srv6. +func (in *Srv6) DeepCopy() *Srv6 { + if in == nil { + return nil + } + out := new(Srv6) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = *in + if in.AuthCertificateProvider != nil { + in, out := &in.AuthCertificateProvider, &out.AuthCertificateProvider + *out = new(Status) + **out = **in + } if in.BandwidthManager != nil { in, out := &in.BandwidthManager, &out.BandwidthManager *out = new(BandwidthManager) @@ -1343,6 +1397,11 @@ func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = new(ProxyStatus) (*in).DeepCopyInto(*out) } + if in.Srv6 != nil { + in, out := &in.Srv6, &out.Srv6 + *out = new(Srv6) + **out = **in + } if in.Stale != nil { in, out := &in.Stale, &out.Stale *out = make(map[string]strfmt.DateTime, len(*in)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go index 3996ca8382..130475ae4b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go @@ -58,6 +58,9 @@ func (in *EndpointIdentifiers) DeepEqual(other *EndpointIdentifiers) bool { return false } + if in.CniAttachmentID != other.CniAttachmentID { + return false + } if in.ContainerID != other.ContainerID { return false } diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go index e0b6f9b070..b579758133 100644 --- a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go @@ -126,6 +126,10 @@ type ENI struct { Tags map[string]string `json:"tags,omitempty"` } +func (e *ENI) DeepCopyInterface() types.Interface { + return e.DeepCopy() +} + // InterfaceID returns the identifier of the interface func (e *ENI) InterfaceID() string { return e.NetworkInterfaceID @@ -174,6 +178,11 @@ type VPC struct { // // +optional IPv6CIDRBlock string `json:"ipv6-cidr,omitempty"` + + // SecondaryCIDRs is the list of Secondary CIDRs associated with the VPC + // + // +optional + SecondaryCIDRs []string `json:"secondary-cidrs,omitempty"` } type VSwitch struct { diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go index ac246f5d18..309101578c 100644 --- a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ func (in *ENI) DeepCopyInto(out *ENI) { *out = make([]string, len(*in)) copy(*out, *in) } - out.VPC = in.VPC + in.VPC.DeepCopyInto(&out.VPC) out.VSwitch = in.VSwitch if in.PrivateIPSets != nil { in, out := &in.PrivateIPSets, &out.PrivateIPSets @@ -125,6 +125,11 @@ func (in *Spec) DeepCopy() *Spec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPC) DeepCopyInto(out *VPC) { *out = *in + if in.SecondaryCIDRs != nil { + in, out := &in.SecondaryCIDRs, &out.SecondaryCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go index 04c8e81576..55fea351a1 100644 --- a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go @@ -44,7 +44,7 @@ func (in *ENI) DeepEqual(other *ENI) bool { } } - if in.VPC != other.VPC { + if !in.VPC.DeepEqual(&other.VPC) { return false } @@ -261,6 +261,22 @@ func (in *VPC) DeepEqual(other *VPC) bool { if in.IPv6CIDRBlock != other.IPv6CIDRBlock { return false } + if ((in.SecondaryCIDRs != nil) && (other.SecondaryCIDRs != nil)) || ((in.SecondaryCIDRs == nil) != (other.SecondaryCIDRs == nil)) { + in, other := &in.SecondaryCIDRs, &other.SecondaryCIDRs + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } return true } diff --git a/vendor/github.com/cilium/cilium/pkg/api/apierror.go b/vendor/github.com/cilium/cilium/pkg/api/apierror.go index 83c44eeba1..7ee425a22f 100644 --- a/vendor/github.com/cilium/cilium/pkg/api/apierror.go +++ b/vendor/github.com/cilium/cilium/pkg/api/apierror.go @@ -30,6 +30,11 @@ func New(code int, msg string, args ...interface{}) *APIError { return &APIError{code: code, msg: msg} } +// GetCode returns the code for the API Error. +func (a *APIError) GetCode() int { + return a.code +} + // Error creates a new API error from the code and error. func Error(code int, err error) *APIError { if err == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go index aec96efcb2..c2cba6c384 100644 --- a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go @@ -208,6 +208,10 @@ type ENI struct { Tags map[string]string `json:"tags,omitempty"` } +func (e *ENI) DeepCopyInterface() types.Interface { + return e.DeepCopy() +} + // InterfaceID returns the identifier of the interface func (e *ENI) InterfaceID() string { return e.ID diff --git a/vendor/github.com/cilium/cilium/pkg/azure/types/types.go b/vendor/github.com/cilium/cilium/pkg/azure/types/types.go index 0d42674e87..ba7419a24b 100644 --- a/vendor/github.com/cilium/cilium/pkg/azure/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/azure/types/types.go @@ -126,6 +126,10 @@ type AzureInterface struct { resourceGroup string `json:"-"` } +func (a *AzureInterface) DeepCopyInterface() types.Interface { + return a.DeepCopy() +} + // SetID sets the Azure interface ID, as well as extracting other fields from // the ID itself. func (a *AzureInterface) SetID(id string) { diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go index a7e974ff64..66c6e5cae4 100644 --- a/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go @@ -72,7 +72,6 @@ func (in *CIDR) DeepCopyInto(out *CIDR) { *out = make(net.IPMask, len(*in)) copy(*out, *in) } - return } // AvailableIPs returns the number of IPs available in a CIDR @@ -89,7 +88,7 @@ func (n *CIDR) Equal(o *CIDR) bool { return Equal(n.IPNet, o.IPNet) } -// Equal returns true if the n and o net.IPNet CIDRs arr Equal. +// Equal returns true if the n and o net.IPNet CIDRs are Equal. func Equal(n, o *net.IPNet) bool { if n == nil || o == nil { return n == o @@ -101,6 +100,23 @@ func Equal(n, o *net.IPNet) bool { bytes.Equal(n.Mask, o.Mask) } +// ZeroNet generates a zero net.IPNet object for the given address family +func ZeroNet(family int) *net.IPNet { + switch family { + case FAMILY_V4: + return &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 8*net.IPv4len), + } + case FAMILY_V6: + return &net.IPNet{ + IP: net.IPv6zero, + Mask: net.CIDRMask(0, 8*net.IPv6len), + } + } + return nil +} + // ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2' func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool { for _, n := range ipNets2 { diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go new file mode 100644 index 0000000000..a43d9b46af --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cidr + +import "github.com/vishvananda/netlink/nl" + +// Family type definitions +const ( + FAMILY_ALL = nl.FAMILY_ALL + FAMILY_V4 = nl.FAMILY_V4 + FAMILY_V6 = nl.FAMILY_V6 + FAMILY_MPLS = nl.FAMILY_MPLS +) diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go new file mode 100644 index 0000000000..dfe393960f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package cidr + +// Dummy values on non-linux platform +const ( + FAMILY_V4 = iota + FAMILY_V6 +) diff --git a/vendor/github.com/cilium/cilium/pkg/client/client.go b/vendor/github.com/cilium/cilium/pkg/client/client.go index a1af5a888c..c24b9d9e60 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/client.go +++ b/vendor/github.com/cilium/cilium/pkg/client/client.go @@ -75,7 +75,7 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { for { select { case <-timeoutAfter: - return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err) + return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %w", timeout.Seconds(), err) default: } @@ -88,7 +88,7 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { for { select { case <-timeoutAfter: - return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err) + return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %w", timeout.Seconds(), err) default: } // This is an API call that we do to the cilium-agent to check @@ -107,14 +107,42 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { // If host is nil then use SockPath provided by CILIUM_SOCK // or the cilium default SockPath func NewClient(host string) (*Client, error) { - clientTrans, err := NewRuntime(host) + clientTrans, err := NewRuntime(WithHost(host)) return &Client{*clientapi.New(clientTrans, strfmt.Default)}, err } -func NewRuntime(host string) (*runtime_client.Runtime, error) { +type runtimeOptions struct { + host string + basePath string +} + +func WithHost(host string) func(options *runtimeOptions) { + return func(options *runtimeOptions) { + options.host = host + } +} + +func WithBasePath(basePath string) func(options *runtimeOptions) { + return func(options *runtimeOptions) { + options.basePath = basePath + } +} + +func NewRuntime(opts ...func(options *runtimeOptions)) (*runtime_client.Runtime, error) { + r := runtimeOptions{} + for _, opt := range opts { + opt(&r) + } + + host := r.host if host == "" { host = DefaultSockPath() } + basePath := r.basePath + if basePath == "" { + basePath = clientapi.DefaultBasePath + } + tmp := strings.SplitN(host, "://", 2) if len(tmp) != 2 { return nil, fmt.Errorf("invalid host format '%s'", host) @@ -138,7 +166,7 @@ func NewRuntime(host string) (*runtime_client.Runtime, error) { transport := configureTransport(nil, tmp[0], host) httpClient := &http.Client{Transport: transport} - clientTrans := runtime_client.NewWithClient(hostHeader, clientapi.DefaultBasePath, + clientTrans := runtime_client.NewWithClient(hostHeader, basePath, clientapi.DefaultSchemes, httpClient) return clientTrans, nil } @@ -228,9 +256,9 @@ func clusterReadiness(cluster *models.RemoteCluster) string { return "ready" } -func numReadyClusters(clustermesh *models.ClusterMeshStatus) int { +func NumReadyClusters(clusters []*models.RemoteCluster) int { numReady := 0 - for _, cluster := range clustermesh.Clusters { + for _, cluster := range clusters { if cluster.Ready { numReady++ } @@ -322,6 +350,20 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "\n") } + if sr.Srv6 != nil { + var fields []string + + status := "Disabled" + fields = append(fields, status) + + if sr.Srv6.Enabled { + fields[0] = "Enabled" + fields = append(fields, fmt.Sprintf("[encap-mode: %s]", sr.Srv6.Srv6EncapMode)) + } + + fmt.Fprintf(w, "SRv6:\t%s\n", strings.Join(fields, "\t")) + } + if sr.CniChaining != nil { fmt.Fprintf(w, "CNI Chaining:\t%s\n", sr.CniChaining.Mode) } @@ -382,35 +424,15 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } if sr.ClusterMesh != nil { - fmt.Fprintf(w, "ClusterMesh:\t%d/%d clusters ready, %d global-services\n", - numReadyClusters(sr.ClusterMesh), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices) - - for _, cluster := range sr.ClusterMesh.Clusters { - if sd.AllClusters || !cluster.Ready { - fmt.Fprintf(w, " %s: %s, %d nodes, %d endpoints, %d identities, %d services, %d failures (last: %s)\n", - cluster.Name, clusterReadiness(cluster), cluster.NumNodes, - cluster.NumEndpoints, cluster.NumIdentities, cluster.NumSharedServices, - cluster.NumFailures, timeSince(time.Time(cluster.LastFailure))) - fmt.Fprintf(w, " └ %s\n", cluster.Status) - - fmt.Fprint(w, " └ remote configuration: ") - if cluster.Config != nil { - fmt.Fprintf(w, "expected=%t, retrieved=%t", cluster.Config.Required, cluster.Config.Retrieved) - if cluster.Config.Retrieved { - fmt.Fprintf(w, ", cluster-id=%d, kvstoremesh=%t, sync-canaries=%t", - cluster.Config.ClusterID, cluster.Config.Kvstoremesh, cluster.Config.SyncCanaries) - } - } else { - fmt.Fprint(w, "expected=unknown, retrieved=unknown") - } - fmt.Fprint(w, "\n") + fmt.Fprintf(w, "ClusterMesh:\t%d/%d remote clusters ready, %d global-services\n", + NumReadyClusters(sr.ClusterMesh.Clusters), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices) - if cluster.Synced != nil { - fmt.Fprintf(w, " └ synchronization status: nodes=%v, endpoints=%v, identities=%v, services=%v\n", - cluster.Synced.Nodes, cluster.Synced.Endpoints, cluster.Synced.Identities, cluster.Synced.Services) - } - } + verbosity := RemoteClustersStatusNotReadyOnly + if sd.AllClusters { + verbosity = RemoteClustersStatusVerbose } + + FormatStatusResponseRemoteClusters(w, sr.ClusterMesh.Clusters, verbosity) } if sr.IPV4BigTCP != nil { @@ -606,7 +628,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } if sd.KubeProxyReplacementDetails && sr.Kubernetes != nil && sr.KubeProxyReplacement != nil { - var selection, mode, xdp string + var selection, mode, dsrMode, xdp string lb := "Disabled" cIP := "Enabled" @@ -618,6 +640,10 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } xdp = np.Acceleration mode = np.Mode + if mode == models.KubeProxyReplacementFeaturesNodePortModeDSR || + mode == models.KubeProxyReplacementFeaturesNodePortModeHybrid { + dsrMode = np.DsrMode + } nPort = fmt.Sprintf("Enabled (Range: %d-%d)", np.PortMin, np.PortMax) lb = "Enabled" } @@ -684,6 +710,9 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai if mode != "" { fmt.Fprintf(tab, " Mode:\t%s\n", mode) } + if dsrMode != "" { + fmt.Fprintf(tab, " DSR Dispatch Mode:\t%s\n", dsrMode) + } if selection != "" { fmt.Fprintf(tab, " Backend Selection:\t%s\n", selection) } @@ -745,3 +774,49 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "Encryption:\t%s\t%s\n", sr.Encryption.Mode, strings.Join(fields, ", ")) } } + +// RemoteClustersStatusVerbosity specifies the verbosity when formatting the remote clusters status information. +type RemoteClustersStatusVerbosity uint + +const ( + // RemoteClustersStatusVerbose outputs all remote clusters information. + RemoteClustersStatusVerbose RemoteClustersStatusVerbosity = iota + // RemoteClustersStatusBrief outputs a one-line summary only for ready clusters. + RemoteClustersStatusBrief + // RemoteClustersStatusNotReadyOnly outputs the remote clusters information for non-ready clusters only. + RemoteClustersStatusNotReadyOnly +) + +func FormatStatusResponseRemoteClusters(w io.Writer, clusters []*models.RemoteCluster, verbosity RemoteClustersStatusVerbosity) { + for _, cluster := range clusters { + if verbosity != RemoteClustersStatusNotReadyOnly || !cluster.Ready { + fmt.Fprintf(w, " %s: %s, %d nodes, %d endpoints, %d identities, %d services, %d reconnections (last: %s)\n", + cluster.Name, clusterReadiness(cluster), cluster.NumNodes, + cluster.NumEndpoints, cluster.NumIdentities, cluster.NumSharedServices, + cluster.NumFailures, timeSince(time.Time(cluster.LastFailure))) + + if verbosity == RemoteClustersStatusBrief && cluster.Ready { + continue + } + + fmt.Fprintf(w, " └ %s\n", cluster.Status) + + fmt.Fprint(w, " └ remote configuration: ") + if cluster.Config != nil { + fmt.Fprintf(w, "expected=%t, retrieved=%t", cluster.Config.Required, cluster.Config.Retrieved) + if cluster.Config.Retrieved { + fmt.Fprintf(w, ", cluster-id=%d, kvstoremesh=%t, sync-canaries=%t", + cluster.Config.ClusterID, cluster.Config.Kvstoremesh, cluster.Config.SyncCanaries) + } + } else { + fmt.Fprint(w, "expected=unknown, retrieved=unknown") + } + fmt.Fprint(w, "\n") + + if cluster.Synced != nil { + fmt.Fprintf(w, " └ synchronization status: nodes=%v, endpoints=%v, identities=%v, services=%v\n", + cluster.Synced.Nodes, cluster.Synced.Endpoints, cluster.Synced.Identities, cluster.Synced.Services) + } + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go index 6fb289e451..d412cf91c6 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go +++ b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go @@ -20,6 +20,13 @@ func (c *Client) EndpointList() ([]*models.Endpoint, error) { return resp.Payload, nil } +// EndpointDeleteMany deletes multiple endpoints +func (c *Client) EndpointDeleteMany(req *models.EndpointBatchDeleteRequest) error { + params := endpoint.NewDeleteEndpointParams().WithEndpoint(req).WithTimeout(api.ClientTimeout) + _, _, err := c.Endpoint.DeleteEndpoint(params) + return Hint(err) +} + // EndpointGet returns endpoint by ID func (c *Client) EndpointGet(id string) (*models.Endpoint, error) { params := endpoint.NewGetEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout) diff --git a/vendor/github.com/cilium/cilium/pkg/client/policy.go b/vendor/github.com/cilium/cilium/pkg/client/policy.go index 5930c17666..f90cd86a7a 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/policy.go +++ b/vendor/github.com/cilium/cilium/pkg/client/policy.go @@ -19,6 +19,16 @@ func (c *Client) PolicyPut(policyJSON string) (*models.Policy, error) { return resp.Payload, nil } +// PolicyReplace replaces the `policyJSON` +func (c *Client) PolicyReplace(policyJSON string, replace bool, replaceWithLabels []string) (*models.Policy, error) { + params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithReplace(&replace).WithReplaceWithLabels(replaceWithLabels).WithTimeout(api.ClientTimeout) + resp, err := c.Policy.PutPolicy(params) + if err != nil { + return nil, Hint(err) + } + return resp.Payload, nil +} + // PolicyGet returns policy rules func (c *Client) PolicyGet(labels []string) (*models.Policy, error) { params := policy.NewGetPolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout) diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go index 176bee29b7..37135b8ed8 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go @@ -10,6 +10,8 @@ import ( "strconv" "strings" + "go4.org/netipx" + "github.com/cilium/cilium/pkg/cidr" ippkg "github.com/cilium/cilium/pkg/ip" ) @@ -221,7 +223,7 @@ func (ac AddrCluster) AsNetIP() net.IP { } func (ac AddrCluster) AsPrefixCluster() PrefixCluster { - return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), ac.clusterID) + return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), WithClusterID(ac.clusterID)) } // PrefixCluster is a type that holds a pair of prefix and ClusterID. @@ -292,14 +294,21 @@ func (pc PrefixCluster) IsSingleIP() bool { return pc.prefix.IsSingleIP() } -func PrefixClusterFrom(addr netip.Addr, bits int, clusterID uint32) PrefixCluster { - return PrefixCluster{ - prefix: netip.PrefixFrom(addr, bits), - clusterID: clusterID, +type PrefixClusterOpts func(*PrefixCluster) + +func WithClusterID(id uint32) PrefixClusterOpts { + return func(pc *PrefixCluster) { pc.clusterID = id } +} + +func PrefixClusterFrom(addr netip.Addr, bits int, opts ...PrefixClusterOpts) PrefixCluster { + pc := PrefixCluster{prefix: netip.PrefixFrom(addr, bits)} + for _, opt := range opts { + opt(&pc) } + return pc } -func PrefixClusterFromCIDR(c *cidr.CIDR, clusterID uint32) PrefixCluster { +func PrefixClusterFromCIDR(c *cidr.CIDR, opts ...PrefixClusterOpts) PrefixCluster { if c == nil { return PrefixCluster{} } @@ -310,10 +319,7 @@ func PrefixClusterFromCIDR(c *cidr.CIDR, clusterID uint32) PrefixCluster { } ones, _ := c.Mask.Size() - return PrefixCluster{ - prefix: netip.PrefixFrom(addr, ones), - clusterID: clusterID, - } + return PrefixClusterFrom(addr, ones, opts...) } func (pc0 PrefixCluster) Equal(pc1 PrefixCluster) bool { @@ -339,17 +345,22 @@ func (pc PrefixCluster) String() string { return pc.prefix.String() + "@" + strconv.FormatUint(uint64(pc.clusterID), 10) } +// AsPrefix returns the IP prefix part of PrefixCluster as a netip.Prefix type. +// This function exists for keeping backward compatibility between the existing +// components which are not aware of the cluster-aware addressing. Calling +// this function against the PrefixCluster which has non-zero clusterID will +// lose the ClusterID information. It should be used with an extra care. +func (pc PrefixCluster) AsPrefix() netip.Prefix { + return netip.PrefixFrom(pc.prefix.Addr(), pc.prefix.Bits()) +} + // AsIPNet returns the IP prefix part of PrefixCluster as a net.IPNet type. This // function exists for keeping backward compatibility between the existing // components which are not aware of the cluster-aware addressing. Calling // this function against the PrefixCluster which has non-zero clusterID will // lose the ClusterID information. It should be used with an extra care. func (pc PrefixCluster) AsIPNet() net.IPNet { - addr := pc.prefix.Addr() - return net.IPNet{ - IP: addr.AsSlice(), - Mask: net.CIDRMask(pc.prefix.Bits(), addr.BitLen()), - } + return *netipx.PrefixIPNet(pc.AsPrefix()) } // This function is solely exists for annotating IPCache's key string with ClusterID. diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go new file mode 100644 index 0000000000..0ec16ac989 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "errors" + "fmt" + + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/defaults" +) + +const ( + // OptClusterName is the name of the OptClusterName option + OptClusterName = "cluster-name" + + // OptClusterID is the name of the OptClusterID option + OptClusterID = "cluster-id" + + // OptMaxConnectedClusters is the name of the OptMaxConnectedClusters option + OptMaxConnectedClusters = "max-connected-clusters" +) + +// ClusterInfo groups together the ClusterID and the ClusterName +type ClusterInfo struct { + ID uint32 `mapstructure:"cluster-id"` + Name string `mapstructure:"cluster-name"` + MaxConnectedClusters uint32 `mapstructure:"max-connected-clusters"` +} + +// DefaultClusterInfo represents the default ClusterInfo values. +var DefaultClusterInfo = ClusterInfo{ + ID: 0, + Name: defaults.ClusterName, + MaxConnectedClusters: defaults.MaxConnectedClusters, +} + +// Flags implements the cell.Flagger interface, to register the given flags. +func (def ClusterInfo) Flags(flags *pflag.FlagSet) { + flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster") + flags.String(OptClusterName, def.Name, "Name of the cluster") + flags.Uint32(OptMaxConnectedClusters, def.MaxConnectedClusters, "Maximum number of clusters to be connected in a clustermesh. Increasing this value will reduce the maximum number of identities available. Valid configurations are [255, 511].") +} + +// Validate validates that the ClusterID is in the valid range (including ClusterID == 0), +// and that the ClusterName is different from the default value if the ClusterID != 0. +func (c ClusterInfo) Validate() error { + if c.ID < ClusterIDMin || c.ID > ClusterIDMax { + return fmt.Errorf("invalid cluster id %d: must be in range %d..%d", + c.ID, ClusterIDMin, ClusterIDMax) + } + + return c.validateName() +} + +// ValidateStrict validates that the ClusterID is in the valid range, but not 0, +// and that the ClusterName is different from the default value. +func (c ClusterInfo) ValidateStrict() error { + if err := ValidateClusterID(c.ID); err != nil { + return err + } + + return c.validateName() +} + +func (c ClusterInfo) validateName() error { + if c.ID != 0 && c.Name == defaults.ClusterName { + return fmt.Errorf("cannot use default cluster name (%s) with option %s", + defaults.ClusterName, OptClusterID) + } + + return nil +} + +// ExtendedClusterMeshEnabled returns true if MaxConnectedClusters value has +// been set to a value larger than the default 255. +func (c ClusterInfo) ExtendedClusterMeshEnabled() bool { + return c.MaxConnectedClusters != defaults.MaxConnectedClusters +} + +// ValidateRemoteConfig validates the remote CiliumClusterConfig to ensure +// compatibility with this cluster's configuration. When configRequired is +// false, a missing configuration or one with ID=0 is allowed for backward +// compatibility, otherwise it is flagged as an error. +func (c ClusterInfo) ValidateRemoteConfig(configRequired bool, config *CiliumClusterConfig) error { + if config == nil || config.ID == 0 { + if configRequired || c.ExtendedClusterMeshEnabled() { + return errors.New("remote cluster is missing cluster configuration") + } + + return nil + } + + if err := ValidateClusterID(config.ID); err != nil { + return err + } + + if c.ExtendedClusterMeshEnabled() && (c.MaxConnectedClusters != config.Capabilities.MaxConnectedClusters) { + return fmt.Errorf("mismatched MaxConnectedClusters; local=%d, remote=%d", c.MaxConnectedClusters, config.Capabilities.MaxConnectedClusters) + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go index e92f269fd7..8137467fe4 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go @@ -5,16 +5,32 @@ package types import ( "fmt" + + "github.com/cilium/cilium/pkg/defaults" ) const ( // ClusterIDMin is the minimum value of the cluster ID - ClusterIDMin = 0 - - // ClusterIDMax is the maximum value of the cluster ID - ClusterIDMax = 255 + ClusterIDMin = 0 + ClusterIDExt511 = 511 ) +// ClusterIDMax is the maximum value of the cluster ID +var ClusterIDMax uint32 = defaults.MaxConnectedClusters + +// InitClusterIDMax validates and sets the ClusterIDMax package level variable. +func (c ClusterInfo) InitClusterIDMax() error { + switch c.MaxConnectedClusters { + case defaults.MaxConnectedClusters, ClusterIDExt511: + ClusterIDMax = c.MaxConnectedClusters + default: + return fmt.Errorf("--%s=%d is invalid; supported values are [%d, %d]", OptMaxConnectedClusters, c.MaxConnectedClusters, defaults.MaxConnectedClusters, ClusterIDExt511) + } + return nil +} + +// ValidateClusterID ensures that the given clusterID is within the configured +// range of the ClusterMesh. func ValidateClusterID(clusterID uint32) error { if clusterID == ClusterIDMin { return fmt.Errorf("ClusterID %d is reserved", ClusterIDMin) @@ -41,30 +57,16 @@ type CiliumClusterConfigCapabilities struct { // kvstore (for instance, by kvstoremesh). This implies that keys are stored // under the dedicated "cilium/cache" prefix, and all are cluster-scoped. Cached bool `json:"cached,omitempty"` -} -func (c *CiliumClusterConfig) Validate() error { - if c == nil || c.ID == 0 { - // When remote cluster doesn't have cluster config, we - // currently just bypass the validation for compatibility. - // Otherwise, we cannot connect with older cluster which - // doesn't support cluster config feature. - // - // When we introduce a new cluster config can't be ignored, - // we should properly check it here and return error. Now - // we only have ClusterID which used to be ignored. - return nil - } - - if err := ValidateClusterID(c.ID); err != nil { - return err - } - - return nil + // The maximum number of clusters the given cluster can support in a ClusterMesh. + MaxConnectedClusters uint32 `json:"maxConnectedClusters,omitempty"` } -// ClusterIDName groups together the ClusterID and the ClusterName -type ClusterIDName struct { - ClusterID uint32 - ClusterName string -} +// ValidationMode defines if a missing CiliumClusterConfig should be allowed for +// backward compatibility, or it should be flagged as an error. +type ValidationMode bool + +const ( + BackwardCompatible ValidationMode = false + Strict ValidationMode = true +) diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go new file mode 100644 index 0000000000..959e903730 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package exec provides useful wrappers around the standard "exec" library. +package exec diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go new file mode 100644 index 0000000000..1762e5dbba --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package exec + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "os/exec" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/time" +) + +func warnToLog(cmd *exec.Cmd, out []byte, scopedLog *logrus.Entry, err error) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + scopedLog.Warn(scanner.Text()) + } +} + +// combinedOutput is the core implementation of catching deadline exceeded +// options and logging errors. +func combinedOutput(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := cmd.CombinedOutput() + if ctx.Err() != nil { + if !errors.Is(ctx.Err(), context.Canceled) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + } + return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err()) + } + if err != nil && verbose { + warnToLog(cmd, out, scopedLog, err) + } + return out, err +} + +// output is the equivalent to combinedOutput with only capturing stdout +func output(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := cmd.Output() + if ctx.Err() != nil { + if !errors.Is(ctx.Err(), context.Canceled) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + } + return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err()) + } + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + err = fmt.Errorf("%w stderr=%q", exitErr, exitErr.Stderr) + } + if verbose { + warnToLog(cmd, out, scopedLog, err) + } + } + return out, err +} + +// Cmd wraps exec.Cmd with a context to provide convenient execution of a +// command with nice checking of the context timeout in the form: +// +// err := exec.Prog().WithTimeout(5*time.Second, myprog, myargs...).CombinedOutput(log, verbose) +type Cmd struct { + *exec.Cmd + ctx context.Context + cancelFn func() +} + +// CommandContext wraps exec.CommandContext to allow this package to be used as +// a drop-in replacement for the standard exec library. +func CommandContext(ctx context.Context, prog string, args ...string) *Cmd { + return &Cmd{ + Cmd: exec.CommandContext(ctx, prog, args...), + ctx: ctx, + } +} + +// WithTimeout creates a Cmd with a context that times out after the specified +// duration. +func WithTimeout(timeout time.Duration, prog string, args ...string) *Cmd { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + cmd := CommandContext(ctx, prog, args...) + cmd.cancelFn = cancel + return cmd +} + +// WithCancel creates a Cmd with a context that can be cancelled by calling the +// resulting Cancel() function. +func WithCancel(ctx context.Context, prog string, args ...string) (*Cmd, context.CancelFunc) { + newCtx, cancel := context.WithCancel(ctx) + cmd := CommandContext(newCtx, prog, args...) + return cmd, cancel +} + +// CombinedOutput runs the command and returns its combined standard output and +// standard error. Unlike the standard library, if the context is exceeded, it +// will return an error indicating so. +// +// Logs any errors that occur to the specified logger. +func (c *Cmd) CombinedOutput(scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := combinedOutput(c.ctx, c.Cmd, scopedLog, verbose) + if c.cancelFn != nil { + c.cancelFn() + } + return out, err +} + +// Output runs the command and returns only standard output, but not the +// standard error. Unlike the standard library, if the context is exceeded, +// it will return an error indicating so. +// +// Logs any errors that occur to the specified logger. +func (c *Cmd) Output(scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := output(c.ctx, c.Cmd, scopedLog, verbose) + if c.cancelFn != nil { + c.cancelFn() + } + return out, err +} diff --git a/vendor/github.com/cilium/cilium/pkg/command/output.go b/vendor/github.com/cilium/cilium/pkg/command/output.go index a3d0490df5..f6196048c5 100644 --- a/vendor/github.com/cilium/cilium/pkg/command/output.go +++ b/vendor/github.com/cilium/cilium/pkg/command/output.go @@ -58,7 +58,7 @@ func PrintOutput(data interface{}) error { func PrintOutputWithPatch(data interface{}, patch interface{}) error { mergedInterface, err := mergeInterfaces(data, patch) if err != nil { - return fmt.Errorf("Unable to merge Interfaces:%v", err) + return fmt.Errorf("Unable to merge Interfaces: %w", err) } return PrintOutputWithType(mergedInterface, outputOpt) } diff --git a/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go b/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go index c9ff7746d2..d31b0194da 100644 --- a/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go +++ b/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go @@ -102,8 +102,5 @@ func MapStringEqualsIgnoreKeys(m1, m2 map[string]string, ignoreKeys []string) bo ignoredM2++ } } - if len(m1)-ignoredM1 != len(m2)-ignoredM2 { - return false - } - return true + return len(m1)-ignoredM1 == len(m2)-ignoredM2 } diff --git a/vendor/github.com/cilium/cilium/pkg/components/components.go b/vendor/github.com/cilium/cilium/pkg/components/components.go deleted file mode 100644 index 38f483a07e..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/components/components.go +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package components - -import ( - "os" - "strings" -) - -const ( - // CiliumAgentName is the name of cilium-agent (daemon) process name. - CiliumAgentName = "cilium-agent" - // CiliumOperatortName is the name of cilium-operator process name. - CiliumOperatortName = "cilium-operator" - // CiliumDaemonTestName is the name of test binary for daemon package. - CiliumDaemonTestName = "cmd.test" -) - -// IsCiliumAgent checks whether the current process is cilium-agent (daemon). -func IsCiliumAgent() bool { - binaryName := os.Args[0] - return strings.HasSuffix(binaryName, CiliumAgentName) || - strings.HasSuffix(binaryName, CiliumDaemonTestName) -} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go new file mode 100644 index 0000000000..33ca884264 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + "fmt" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/link" + "golang.org/x/sys/unix" +) + +// HaveAttachCgroup returns nil if the kernel is compiled with +// CONFIG_CGROUP_BPF. +// +// It's only an approximation and doesn't execute a successful cgroup attachment +// under the hood. If any unexpected errors are encountered, the original error +// is returned. +func HaveAttachCgroup() error { + attachCgroupOnce.Do(func() { + attachCgroupResult = haveAttachCgroup() + }) + + return attachCgroupResult +} + +func haveAttachCgroup() error { + // Load known-good program supported by the earliest kernels with cgroup + // support. + spec := &ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + } + + p, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err != nil { + return fmt.Errorf("create cgroup program: %w: %w", err, ebpf.ErrNotSupported) + } + defer p.Close() + + // Attaching to a non-cgroup node should result in EBADF when creating the + // link, compared to EINVAL if the kernel does not support or was compiled + // without CONFIG_CGROUP_BPF. + _, err = link.AttachCgroup(link.CgroupOptions{Path: "/dev/null", Program: p, Attach: spec.AttachType}) + if errors.Is(err, unix.EBADF) { + // The kernel checked the given file descriptor from within the cgroup prog + // attach handler. Assume it supports attaching cgroup progs. + return nil + } + if err != nil { + // Preserve the original error in the error string. Needs Go 1.20. + return fmt.Errorf("link cgroup program to /dev/null: %w: %w", err, ebpf.ErrNotSupported) + } + + return errors.New("attaching prog to /dev/null did not result in error") +} + +var attachCgroupOnce sync.Once +var attachCgroupResult error diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go new file mode 100644 index 0000000000..5ce0c0aa9a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + + "golang.org/x/sys/unix" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + + "github.com/cilium/cilium/pkg/lock" +) + +// HaveAttachType returns nil if the given program/attach type combination is +// supported by the underlying kernel. Returns ebpf.ErrNotSupported if loading a +// program with the given Program/AttachType fails. If the probe is inconclusive +// due to an unrecognized return code, the original error is returned. +// +// Note that program types that don't use attach types will silently succeed if +// an attach type is specified. +// +// Probe results are cached by the package and shouldn't be memoized by the +// caller. +func HaveAttachType(pt ebpf.ProgramType, at ebpf.AttachType) (err error) { + if err := features.HaveProgramType(pt); err != nil { + return err + } + + attachProbesMu.Lock() + defer attachProbesMu.Unlock() + if err, ok := attachProbes[attachProbe{pt, at}]; ok { + return err + } + + defer func() { + // Closes over named return variable err to cache any returned errors. + attachProbes[attachProbe{pt, at}] = err + }() + + spec := &ebpf.ProgramSpec{ + Type: pt, + AttachType: at, + Instructions: asm.Instructions{ + // recvmsg and peername require a return value of 1, use it for all probes. + asm.LoadImm(asm.R0, 1, asm.DWord), + asm.Return(), + }, + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.E2BIG) { + err = ebpf.ErrNotSupported + } + if err != nil { + return err + } + + return nil +} + +type attachProbe struct { + pt ebpf.ProgramType + at ebpf.AttachType +} + +var attachProbesMu lock.Mutex +var attachProbes map[attachProbe]error = make(map[attachProbe]error) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go new file mode 100644 index 0000000000..285c8851d5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package probes provides BPF features checks based on bpftool. +package probes diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go new file mode 100644 index 0000000000..c815eb729e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "time" +) + +// Available CONFIG_HZ values, sorted from highest to lowest. +var hzValues = []uint16{1000, 300, 250, 100} + +// KernelHZ attempts to estimate the kernel's CONFIG_HZ compile-time value by +// making snapshots of the kernel timestamp with a time interval in between. +// +// Blocks for at least 100ms while the measurement is in progress. Can block +// significantly longer under some hypervisors like VirtualBox due to buggy +// clocks, interrupt coalescing and low timer resolution. +func KernelHZ() (uint16, error) { + f, err := os.Open("/proc/schedstat") + if err != nil { + return 0, err + } + defer f.Close() + + // Measure the kernel timestamp at least 100ms apart, giving kernel timer and + // wall clock ample opportunity to advance for adequate sample size. + j1, err := readSchedstat(f) + if err != nil { + return 0, err + } + + // On some platforms, this can put the goroutine to sleep for significantly + // longer than 100ms. Do not rely on readings being anywhere near 100ms apart. + time.Sleep(time.Millisecond * 100) + + j2, err := readSchedstat(f) + if err != nil { + return 0, err + } + + hz, err := j1.interpolate(j2) + if err != nil { + return 0, fmt.Errorf("interpolating hz value: %w", err) + } + + return nearest(hz, hzValues) +} + +// Jiffies returns the kernel's internal timestamp in jiffies read from +// /proc/schedstat. +func Jiffies() (uint64, error) { + f, err := os.Open("/proc/schedstat") + if err != nil { + return 0, err + } + defer f.Close() + + k, err := readSchedstat(f) + if err != nil { + return 0, err + } + + return k.k, nil +} + +// readSchedstat expects to read /proc/schedstat and returns the first line +// matching 'timestamp %d'. Upon return, f is rewound to allow reuse. +// +// Should not be called concurrently. +func readSchedstat(f io.ReadSeeker) (ktime, error) { + // Rewind the file when done so the next call gets fresh data. + defer func() { _, _ = f.Seek(0, 0) }() + + var j uint64 + var t = time.Now() + + s := bufio.NewScanner(f) + for s.Scan() { + if _, err := fmt.Sscanf(s.Text(), "timestamp %d", &j); err == nil { + return ktime{j, t}, nil + } + } + + return ktime{}, errors.New("no kernel timestamp found") +} + +type ktime struct { + k uint64 + t time.Time +} + +// interpolate returns the amount of jiffies (ktime) that would have elapsed if +// both ktimes were measured exactly 1 second apart. Using linear interpolation, +// the delta between both kernel timestamps is adjusted based on the elapsed +// wall time between both measurements. +func (old ktime) interpolate(new ktime) (uint16, error) { + if old.t.After(new.t) { + return 0, fmt.Errorf("old wall time %v is more recent than %v", old.t, new.t) + } + if old.k > new.k { + return 0, fmt.Errorf("old kernel timer %d is higher than %d", old.k, new.k) + } + + // Jiffy and duration delta. + kd := new.k - old.k + td := new.t.Sub(old.t) + + // Linear interpolation to represent elapsed jiffies as a per-second value. + hz := float64(kd) / td.Seconds() + hz = math.Round(hz) + if hz > math.MaxUint16 { + return 0, fmt.Errorf("interpolated hz value would overflow uint16: %f", hz) + } + + return uint16(hz), nil +} + +// nearest returns the entry from values that's closest to in. If in has an +// equal distance to multiple values, the value that appears the earliest in +// values wins. Returns error if values is empty. +func nearest(in uint16, values []uint16) (uint16, error) { + if len(values) == 0 { + return 0, errors.New("values cannot be empty") + } + + var out uint16 + min := ^uint16(0) + for _, v := range values { + // Get absolute distance between in and v. + d := uint16(in - v) + if in < v { + d = v - in + } + + // Check if the distance to the current number is smaller than to the + // previous number. + if d < min { + min = d + out = v + } + } + + return out, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go new file mode 100644 index 0000000000..2d8196d535 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + "fmt" + "net" + "runtime" + "sync" + + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +var ( + managedNeighborOnce sync.Once + managedNeighborResult error +) + +// HaveManagedNeighbors returns nil if the host supports managed neighbor entries (NTF_EXT_MANAGED). +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveManagedNeighbors() error { + managedNeighborOnce.Do(func() { + ch := make(chan struct{}) + + // In order to call haveManagedNeighbors safely, it has to be started + // in a goroutine, so we can make sure the goroutine ends when the function exits. + // This makes sure the underlying OS thread exits if we fail to restore it to the original netns. + go func() { + managedNeighborResult = haveManagedNeighbors() + close(ch) + }() + <-ch // wait for probe to finish + + // if we encounter a different error than ErrNotSupported, terminate the agent. + if managedNeighborResult != nil && !errors.Is(managedNeighborResult, ErrNotSupported) { + log.WithError(managedNeighborResult).Fatal("failed to probe managed neighbor support") + } + }) + + return managedNeighborResult +} + +func haveManagedNeighbors() (outer error) { + runtime.LockOSThread() + oldns, err := netns.Get() + if err != nil { + return fmt.Errorf("failed to get current netns: %w", err) + } + defer oldns.Close() + + newns, err := netns.New() + if err != nil { + return fmt.Errorf("failed to create new netns: %w", err) + } + defer newns.Close() + defer func() { + // defer closes over named return variable err + if nerr := netns.Set(oldns); nerr != nil { + // The current goroutine is locked to an OS thread and we've failed + // to undo state modifications to the thread. Returning without unlocking + // the goroutine will make sure the underlying OS thread dies. + outer = fmt.Errorf("error setting thread back to its original netns: %w (original error: %w)", nerr, outer) + return + } + // only now that we have successfully changed the thread back to its + // original state (netns) we can safely unlock the goroutine from its OS thread. + runtime.UnlockOSThread() + }() + + // Use a veth device instead of a dummy to avoid the kernel having to modprobe + // the dummy kmod, which could potentially be compiled out. veth is currently + // a hard dependency for Cilium, so safe to assume the module is available if + // not already loaded. + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "veth0"}, + PeerName: "veth1", + } + + if err := netlink.LinkAdd(veth); err != nil { + return fmt.Errorf("failed to add dummy veth: %w", err) + } + + neigh := netlink.Neigh{ + LinkIndex: veth.Index, + IP: net.IPv4(0, 0, 0, 1), + Flags: NTF_EXT_LEARNED, + FlagsExt: NTF_EXT_MANAGED, + } + + if err := netlink.NeighAdd(&neigh); err != nil { + return fmt.Errorf("failed to add neighbor: %w", err) + } + + nl, err := netlink.NeighList(veth.Index, 0) + if err != nil { + return fmt.Errorf("failed to list neighbors: %w", err) + } + + for _, n := range nl { + if !n.IP.Equal(neigh.IP) { + continue + } + if n.Flags != NTF_EXT_LEARNED { + continue + } + if n.FlagsExt != NTF_EXT_MANAGED { + continue + } + + return nil + } + + return ErrNotSupported +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go new file mode 100644 index 0000000000..0bc687aa97 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "sync" + "text/template" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/command/exec" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "probes") + once sync.Once + probeManager *ProbeManager + tpl = template.New("headerfile") +) + +func init() { + const content = ` +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright Authors of Cilium */ + +/* THIS FILE WAS GENERATED DURING AGENT STARTUP. */ + +#pragma once + +{{- if not .Common}} +#include "features.h" +{{- end}} + +{{- range $key, $value := .Features}} +{{- if $value}} +#define {{$key}} 1 +{{end}} +{{- end}} +` + var err error + tpl, err = tpl.Parse(content) + if err != nil { + log.WithError(err).Fatal("could not parse headerfile template") + } +} + +// ErrNotSupported indicates that a feature is not supported by the current kernel. +var ErrNotSupported = errors.New("not supported") + +// KernelParam is a type based on string which represents CONFIG_* kernel +// parameters which usually have values "y", "n" or "m". +type KernelParam string + +// Enabled checks whether the kernel parameter is enabled. +func (kp KernelParam) Enabled() bool { + return kp == "y" +} + +// Module checks whether the kernel parameter is enabled as a module. +func (kp KernelParam) Module() bool { + return kp == "m" +} + +// kernelOption holds information about kernel parameters to probe. +type kernelOption struct { + Description string + Enabled bool + CanBeModule bool +} + +type ProgramHelper struct { + Program ebpf.ProgramType + Helper asm.BuiltinFunc +} + +type miscFeatures struct { + HaveLargeInsnLimit bool + HaveFibIfindex bool +} + +type FeatureProbes struct { + ProgramHelpers map[ProgramHelper]bool + Misc miscFeatures +} + +// SystemConfig contains kernel configuration and sysctl parameters related to +// BPF functionality. +type SystemConfig struct { + UnprivilegedBpfDisabled int `json:"unprivileged_bpf_disabled"` + BpfJitEnable int `json:"bpf_jit_enable"` + BpfJitHarden int `json:"bpf_jit_harden"` + BpfJitKallsyms int `json:"bpf_jit_kallsyms"` + BpfJitLimit int `json:"bpf_jit_limit"` + ConfigBpf KernelParam `json:"CONFIG_BPF"` + ConfigBpfSyscall KernelParam `json:"CONFIG_BPF_SYSCALL"` + ConfigHaveEbpfJit KernelParam `json:"CONFIG_HAVE_EBPF_JIT"` + ConfigBpfJit KernelParam `json:"CONFIG_BPF_JIT"` + ConfigBpfJitAlwaysOn KernelParam `json:"CONFIG_BPF_JIT_ALWAYS_ON"` + ConfigCgroups KernelParam `json:"CONFIG_CGROUPS"` + ConfigCgroupBpf KernelParam `json:"CONFIG_CGROUP_BPF"` + ConfigCgroupNetClassID KernelParam `json:"CONFIG_CGROUP_NET_CLASSID"` + ConfigSockCgroupData KernelParam `json:"CONFIG_SOCK_CGROUP_DATA"` + ConfigBpfEvents KernelParam `json:"CONFIG_BPF_EVENTS"` + ConfigKprobeEvents KernelParam `json:"CONFIG_KPROBE_EVENTS"` + ConfigUprobeEvents KernelParam `json:"CONFIG_UPROBE_EVENTS"` + ConfigTracing KernelParam `json:"CONFIG_TRACING"` + ConfigFtraceSyscalls KernelParam `json:"CONFIG_FTRACE_SYSCALLS"` + ConfigFunctionErrorInjection KernelParam `json:"CONFIG_FUNCTION_ERROR_INJECTION"` + ConfigBpfKprobeOverride KernelParam `json:"CONFIG_BPF_KPROBE_OVERRIDE"` + ConfigNet KernelParam `json:"CONFIG_NET"` + ConfigXdpSockets KernelParam `json:"CONFIG_XDP_SOCKETS"` + ConfigLwtunnelBpf KernelParam `json:"CONFIG_LWTUNNEL_BPF"` + ConfigNetActBpf KernelParam `json:"CONFIG_NET_ACT_BPF"` + ConfigNetClsBpf KernelParam `json:"CONFIG_NET_CLS_BPF"` + ConfigNetClsAct KernelParam `json:"CONFIG_NET_CLS_ACT"` + ConfigNetSchIngress KernelParam `json:"CONFIG_NET_SCH_INGRESS"` + ConfigXfrm KernelParam `json:"CONFIG_XFRM"` + ConfigIPRouteClassID KernelParam `json:"CONFIG_IP_ROUTE_CLASSID"` + ConfigIPv6Seg6Bpf KernelParam `json:"CONFIG_IPV6_SEG6_BPF"` + ConfigBpfLircMode2 KernelParam `json:"CONFIG_BPF_LIRC_MODE2"` + ConfigBpfStreamParser KernelParam `json:"CONFIG_BPF_STREAM_PARSER"` + ConfigNetfilterXtMatchBpf KernelParam `json:"CONFIG_NETFILTER_XT_MATCH_BPF"` + ConfigBpfilter KernelParam `json:"CONFIG_BPFILTER"` + ConfigBpfilterUmh KernelParam `json:"CONFIG_BPFILTER_UMH"` + ConfigTestBpf KernelParam `json:"CONFIG_TEST_BPF"` + ConfigKernelHz KernelParam `json:"CONFIG_HZ"` +} + +// MapTypes contains bools indicating which types of BPF maps the currently +// running kernel supports. +type MapTypes struct { + HaveHashMapType bool `json:"have_hash_map_type"` + HaveArrayMapType bool `json:"have_array_map_type"` + HaveProgArrayMapType bool `json:"have_prog_array_map_type"` + HavePerfEventArrayMapType bool `json:"have_perf_event_array_map_type"` + HavePercpuHashMapType bool `json:"have_percpu_hash_map_type"` + HavePercpuArrayMapType bool `json:"have_percpu_array_map_type"` + HaveStackTraceMapType bool `json:"have_stack_trace_map_type"` + HaveCgroupArrayMapType bool `json:"have_cgroup_array_map_type"` + HaveLruHashMapType bool `json:"have_lru_hash_map_type"` + HaveLruPercpuHashMapType bool `json:"have_lru_percpu_hash_map_type"` + HaveLpmTrieMapType bool `json:"have_lpm_trie_map_type"` + HaveArrayOfMapsMapType bool `json:"have_array_of_maps_map_type"` + HaveHashOfMapsMapType bool `json:"have_hash_of_maps_map_type"` + HaveDevmapMapType bool `json:"have_devmap_map_type"` + HaveSockmapMapType bool `json:"have_sockmap_map_type"` + HaveCpumapMapType bool `json:"have_cpumap_map_type"` + HaveXskmapMapType bool `json:"have_xskmap_map_type"` + HaveSockhashMapType bool `json:"have_sockhash_map_type"` + HaveCgroupStorageMapType bool `json:"have_cgroup_storage_map_type"` + HaveReuseportSockarrayMapType bool `json:"have_reuseport_sockarray_map_type"` + HavePercpuCgroupStorageMapType bool `json:"have_percpu_cgroup_storage_map_type"` + HaveQueueMapType bool `json:"have_queue_map_type"` + HaveStackMapType bool `json:"have_stack_map_type"` +} + +// Features contains BPF feature checks returned by bpftool. +type Features struct { + SystemConfig `json:"system_config"` + MapTypes `json:"map_types"` +} + +// ProbeManager is a manager of BPF feature checks. +type ProbeManager struct { + features Features +} + +// NewProbeManager returns a new instance of ProbeManager - a manager of BPF +// feature checks. +func NewProbeManager() *ProbeManager { + newProbeManager := func() { + probeManager = &ProbeManager{} + probeManager.features = probeManager.Probe() + } + once.Do(newProbeManager) + return probeManager +} + +// Probe probes the underlying kernel for features. +func (*ProbeManager) Probe() Features { + var features Features + out, err := exec.WithTimeout( + defaults.ExecTimeout, + "bpftool", "-j", "feature", "probe", + ).CombinedOutput(log, true) + if err != nil { + log.WithError(err).Fatal("could not run bpftool") + } + if err := json.Unmarshal(out, &features); err != nil { + log.WithError(err).Fatal("could not parse bpftool output") + } + return features +} + +// SystemConfigProbes performs a check of kernel configuration parameters. It +// returns an error when parameters required by Cilium are not enabled. It logs +// warnings when optional parameters are not enabled. +// +// When kernel config file is not found, bpftool can't probe kernel configuration +// parameter real setting, so only return error log when kernel config file exists +// and kernel configuration parameter setting is disabled +func (p *ProbeManager) SystemConfigProbes() error { + var notFound bool + if !p.KernelConfigAvailable() { + notFound = true + log.Info("Kernel config file not found: if the agent fails to start, check the system requirements at https://docs.cilium.io/en/stable/operations/system_requirements") + } + requiredParams := p.GetRequiredConfig() + for param, kernelOption := range requiredParams { + if !kernelOption.Enabled && !notFound { + module := "" + if kernelOption.CanBeModule { + module = " or module" + } + return fmt.Errorf("%s kernel parameter%s is required (needed for: %s)", param, module, kernelOption.Description) + } + } + optionalParams := p.GetOptionalConfig() + for param, kernelOption := range optionalParams { + if !kernelOption.Enabled && !notFound { + module := "" + if kernelOption.CanBeModule { + module = " or module" + } + log.Warningf("%s optional kernel parameter%s is not in kernel (needed for: %s)", param, module, kernelOption.Description) + } + } + return nil +} + +// GetRequiredConfig performs a check of mandatory kernel configuration options. It +// returns a map indicating which required kernel parameters are enabled - and which are not. +// GetRequiredConfig is being used by CLI "cilium kernel-check". +func (p *ProbeManager) GetRequiredConfig() map[KernelParam]kernelOption { + config := p.features.SystemConfig + coreInfraDescription := "Essential eBPF infrastructure" + kernelParams := make(map[KernelParam]kernelOption) + + kernelParams["CONFIG_BPF"] = kernelOption{ + Enabled: config.ConfigBpf.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_BPF_SYSCALL"] = kernelOption{ + Enabled: config.ConfigBpfSyscall.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_NET_SCH_INGRESS"] = kernelOption{ + Enabled: config.ConfigNetSchIngress.Enabled() || config.ConfigNetSchIngress.Module(), + Description: coreInfraDescription, + CanBeModule: true, + } + kernelParams["CONFIG_NET_CLS_BPF"] = kernelOption{ + Enabled: config.ConfigNetClsBpf.Enabled() || config.ConfigNetClsBpf.Module(), + Description: coreInfraDescription, + CanBeModule: true, + } + kernelParams["CONFIG_NET_CLS_ACT"] = kernelOption{ + Enabled: config.ConfigNetClsAct.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_BPF_JIT"] = kernelOption{ + Enabled: config.ConfigBpfJit.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_HAVE_EBPF_JIT"] = kernelOption{ + Enabled: config.ConfigHaveEbpfJit.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + + return kernelParams +} + +// GetOptionalConfig performs a check of *optional* kernel configuration options. It +// returns a map indicating which optional/non-mandatory kernel parameters are enabled. +// GetOptionalConfig is being used by CLI "cilium kernel-check". +func (p *ProbeManager) GetOptionalConfig() map[KernelParam]kernelOption { + config := p.features.SystemConfig + kernelParams := make(map[KernelParam]kernelOption) + + kernelParams["CONFIG_CGROUP_BPF"] = kernelOption{ + Enabled: config.ConfigCgroupBpf.Enabled(), + Description: "Host Reachable Services and Sockmap optimization", + CanBeModule: false, + } + kernelParams["CONFIG_LWTUNNEL_BPF"] = kernelOption{ + Enabled: config.ConfigLwtunnelBpf.Enabled(), + Description: "Lightweight Tunnel hook for IP-in-IP encapsulation", + CanBeModule: false, + } + kernelParams["CONFIG_BPF_EVENTS"] = kernelOption{ + Enabled: config.ConfigBpfEvents.Enabled(), + Description: "Visibility and congestion management with datapath", + CanBeModule: false, + } + + return kernelParams +} + +// KernelConfigAvailable checks if the Kernel Config is available on the +// system or not. +func (p *ProbeManager) KernelConfigAvailable() bool { + // Check Kernel Config is available or not. + // We are replicating BPFTools logic here to check if kernel config is available + // https://elixir.bootlin.com/linux/v5.7/source/tools/bpf/bpftool/feature.c#L390 + info := unix.Utsname{} + err := unix.Uname(&info) + if err != nil { + return false + } + release := strings.TrimSpace(string(bytes.Trim(info.Release[:], "\x00"))) + + // Any error checking these files will return Kernel config not found error + if _, err := os.Stat(fmt.Sprintf("/boot/config-%s", release)); err != nil { + if _, err = os.Stat("/proc/config.gz"); err != nil { + return false + } + } + + return true +} + +// HaveProgramHelper is a wrapper around features.HaveProgramHelper() to +// check if a certain BPF program/helper copmbination is supported by the kernel. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + err := features.HaveProgramHelper(pt, helper) + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).WithField("programtype", pt).WithField("helper", helper).Fatal("failed to probe helper") + } + return nil +} + +// HaveLargeInstructionLimit is a wrapper around features.HaveLargeInstructions() +// to check if the kernel supports the 1 Million instruction limit. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveLargeInstructionLimit() error { + err := features.HaveLargeInstructions() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe large instruction limit") + } + return nil +} + +// HaveBoundedLoops is a wrapper around features.HaveBoundedLoops() +// to check if the kernel supports bounded loops in BPF programs. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveBoundedLoops() error { + err := features.HaveBoundedLoops() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe bounded loops") + } + return nil +} + +// HaveFibIfindex checks if kernel has d1c362e1dd68 ("bpf: Always return target +// ifindex in bpf_fib_lookup") which is 5.10+. This got merged in the same kernel +// as the new redirect helpers. +func HaveFibIfindex() error { + return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnRedirectPeer) +} + +// HaveV2ISA is a wrapper around features.HaveV2ISA() to check if the kernel +// supports the V2 ISA. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveV2ISA() error { + err := features.HaveV2ISA() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe V2 ISA") + } + return nil +} + +// HaveV3ISA is a wrapper around features.HaveV3ISA() to check if the kernel +// supports the V3 ISA. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveV3ISA() error { + err := features.HaveV3ISA() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe V3 ISA") + } + return nil +} + +// HaveOuterSourceIPSupport tests whether the kernel support setting the outer +// source IP address via the bpf_skb_set_tunnel_key BPF helper. We can't rely +// on the verifier to reject a program using the new support because the +// verifier just accepts any argument size for that helper; non-supported +// fields will simply not be used. Instead, we set the outer source IP and +// retrieve it with bpf_skb_get_tunnel_key right after. If the retrieved value +// equals the value set, we have a confirmation the kernel supports it. +func HaveOuterSourceIPSupport() (err error) { + defer func() { + if err != nil && !errors.Is(err, ebpf.ErrNotSupported) { + log.WithError(err).Fatal("failed to probe for outer source IP support") + } + }() + + progSpec := &ebpf.ProgramSpec{ + Name: "set_tunnel_key_probe", + Type: ebpf.SchedACT, + License: "GPL", + } + progSpec.Instructions = asm.Instructions{ + asm.Mov.Reg(asm.R8, asm.R1), + + asm.Mov.Imm(asm.R2, 0), + asm.StoreMem(asm.RFP, -8, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -16, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -24, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -32, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -40, asm.R2, asm.DWord), + asm.Mov.Imm(asm.R2, 42), + asm.StoreMem(asm.RFP, -44, asm.R2, asm.Word), + asm.Mov.Reg(asm.R2, asm.RFP), + asm.Add.Imm(asm.R2, -44), + asm.Mov.Imm(asm.R3, 44), // sizeof(struct bpf_tunnel_key) when setting the outer source IP is supported. + asm.Mov.Imm(asm.R4, 0), + asm.FnSkbSetTunnelKey.Call(), + + asm.Mov.Reg(asm.R1, asm.R8), + asm.Mov.Reg(asm.R2, asm.RFP), + asm.Add.Imm(asm.R2, -44), + asm.Mov.Imm(asm.R3, 44), + asm.Mov.Imm(asm.R4, 0), + asm.FnSkbGetTunnelKey.Call(), + + asm.LoadMem(asm.R0, asm.RFP, -44, asm.Word), + asm.Return(), + } + prog, err := ebpf.NewProgram(progSpec) + if err != nil { + return err + } + defer prog.Close() + + pkt := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ret, _, err := prog.Test(pkt) + if err != nil { + return err + } + if ret != 42 { + return ebpf.ErrNotSupported + } + return nil +} + +// HaveSKBAdjustRoomL2RoomMACSupport tests whether the kernel supports the `bpf_skb_adjust_room` helper +// with the `BPF_ADJ_ROOM_MAC` mode. To do so, we create a program that requests the passed in SKB +// to be expanded by 20 bytes. The helper checks the `mode` argument and will return -ENOSUPP if +// the mode is unknown. Otherwise it should resize the SKB by 20 bytes and return 0. +func HaveSKBAdjustRoomL2RoomMACSupport() (err error) { + defer func() { + if err != nil && !errors.Is(err, ebpf.ErrNotSupported) { + log.WithError(err).Fatal("failed to probe for bpf_skb_adjust_room L2 room MAC support") + } + }() + + progSpec := &ebpf.ProgramSpec{ + Name: "adjust_mac_room", + Type: ebpf.SchedCLS, + License: "GPL", + } + progSpec.Instructions = asm.Instructions{ + asm.Mov.Imm(asm.R2, 20), // len_diff + asm.Mov.Imm(asm.R3, 1), // mode: BPF_ADJ_ROOM_MAC + asm.Mov.Imm(asm.R4, 0), // flags: 0 + asm.FnSkbAdjustRoom.Call(), + asm.Return(), + } + prog, err := ebpf.NewProgram(progSpec) + if err != nil { + return err + } + defer prog.Close() + + // This is a Eth + IPv4 + UDP + data packet. The helper relies on a valid packet being passed in + // since it wants to know offsets of the different layers. + buf := gopacket.NewSerializeBuffer() + err = gopacket.SerializeLayers(buf, gopacket.SerializeOptions{}, + &layers.Ethernet{ + DstMAC: net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + SrcMAC: net.HardwareAddr{0x0e, 0xf5, 0x16, 0x3d, 0x6b, 0xab}, + EthernetType: layers.EthernetTypeIPv4, + }, + &layers.IPv4{ + Version: 4, + IHL: 5, + Length: 49, + Id: 0xCECB, + TTL: 64, + Protocol: layers.IPProtocolUDP, + SrcIP: net.IPv4(0xc0, 0xa8, 0xb2, 0x56), + DstIP: net.IPv4(0xc0, 0xa8, 0xb2, 0xff), + }, + &layers.UDP{ + SrcPort: 23939, + DstPort: 32412, + }, + gopacket.Payload("M-SEARCH * HTTP/1.1\x0d\x0a"), + ) + if err != nil { + return fmt.Errorf("craft packet: %w", err) + } + + ret, _, err := prog.Test(buf.Bytes()) + if err != nil { + return err + } + if ret != 0 { + return ebpf.ErrNotSupported + } + return nil +} + +// HaveIPv6Support tests whether kernel can open an IPv6 socket. This will +// also implicitly auto-load IPv6 kernel module if available and not yet +// loaded. +func HaveIPv6Support() error { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_STREAM, 0) + if errors.Is(err, unix.EAFNOSUPPORT) || errors.Is(err, unix.EPROTONOSUPPORT) { + return ErrNotSupported + } + unix.Close(fd) + return nil +} + +// CreateHeaderFiles creates C header files with macros indicating which BPF +// features are available in the kernel. +func CreateHeaderFiles(headerDir string, probes *FeatureProbes) error { + common, err := os.Create(filepath.Join(headerDir, "features.h")) + if err != nil { + return fmt.Errorf("could not create common features header file: %w", err) + } + defer common.Close() + if err := writeCommonHeader(common, probes); err != nil { + return fmt.Errorf("could not write common features header file: %w", err) + } + + skb, err := os.Create(filepath.Join(headerDir, "features_skb.h")) + if err != nil { + return fmt.Errorf("could not create skb related features header file: %w", err) + } + defer skb.Close() + if err := writeSkbHeader(skb, probes); err != nil { + return fmt.Errorf("could not write skb related features header file: %w", err) + } + + xdp, err := os.Create(filepath.Join(headerDir, "features_xdp.h")) + if err != nil { + return fmt.Errorf("could not create xdp related features header file: %w", err) + } + defer xdp.Close() + if err := writeXdpHeader(xdp, probes); err != nil { + return fmt.Errorf("could not write xdp related features header file: %w", err) + } + + return nil +} + +// ExecuteHeaderProbes probes the kernel for a specific set of BPF features +// which are currently used to generate various feature macros for the datapath. +// The probe results returned in FeatureProbes are then used in the respective +// function that writes the actual C macro definitions. +// Further needed probes should be added here, while new macro strings need to +// be added in the correct `write*Header()` function. +func ExecuteHeaderProbes() *FeatureProbes { + probes := FeatureProbes{ + ProgramHelpers: make(map[ProgramHelper]bool), + Misc: miscFeatures{}, + } + + progHelpers := []ProgramHelper{ + // common probes + {ebpf.CGroupSock, asm.FnGetNetnsCookie}, + {ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}, + {ebpf.CGroupSockAddr, asm.FnGetSocketCookie}, + {ebpf.CGroupSock, asm.FnJiffies64}, + {ebpf.CGroupSockAddr, asm.FnJiffies64}, + {ebpf.SchedCLS, asm.FnJiffies64}, + {ebpf.XDP, asm.FnJiffies64}, + {ebpf.CGroupSockAddr, asm.FnSkLookupTcp}, + {ebpf.CGroupSockAddr, asm.FnSkLookupUdp}, + {ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}, + {ebpf.CGroupSock, asm.FnSetRetval}, + {ebpf.SchedCLS, asm.FnRedirectNeigh}, + {ebpf.SchedCLS, asm.FnRedirectPeer}, + + // skb related probes + {ebpf.SchedCLS, asm.FnSkbChangeTail}, + {ebpf.SchedCLS, asm.FnCsumLevel}, + + // xdp related probes + {ebpf.XDP, asm.FnXdpLoadBytes}, + {ebpf.XDP, asm.FnXdpStoreBytes}, + } + for _, ph := range progHelpers { + probes.ProgramHelpers[ph] = (HaveProgramHelper(ph.Program, ph.Helper) == nil) + } + + probes.Misc.HaveLargeInsnLimit = (HaveLargeInstructionLimit() == nil) + probes.Misc.HaveFibIfindex = (HaveFibIfindex() == nil) + + return &probes +} + +// writeCommonHeader defines macross for bpf/include/bpf/features.h +func writeCommonHeader(writer io.Writer, probes *FeatureProbes) error { + features := map[string]bool{ + "HAVE_NETNS_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnGetNetnsCookie}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}], + "HAVE_SOCKET_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetSocketCookie}], + "HAVE_JIFFIES": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnJiffies64}], + "HAVE_SOCKET_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnSkLookupTcp}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnSkLookupUdp}], + "HAVE_CGROUP_ID": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}], + "HAVE_LARGE_INSN_LIMIT": probes.Misc.HaveLargeInsnLimit, + "HAVE_SET_RETVAL": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnSetRetval}], + "HAVE_FIB_NEIGH": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnRedirectNeigh}], + "HAVE_FIB_IFINDEX": probes.Misc.HaveFibIfindex, + } + + return writeFeatureHeader(writer, features, true) +} + +// writeSkbHeader defines macros for bpf/include/bpf/features_skb.h +func writeSkbHeader(writer io.Writer, probes *FeatureProbes) error { + featuresSkb := map[string]bool{ + "HAVE_CHANGE_TAIL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnSkbChangeTail}], + "HAVE_CSUM_LEVEL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnCsumLevel}], + } + + return writeFeatureHeader(writer, featuresSkb, false) +} + +// writeXdpHeader defines macros for bpf/include/bpf/features_xdp.h +func writeXdpHeader(writer io.Writer, probes *FeatureProbes) error { + featuresXdp := map[string]bool{ + "HAVE_XDP_LOAD_BYTES": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpLoadBytes}], + "HAVE_XDP_STORE_BYTES": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpStoreBytes}], + } + + return writeFeatureHeader(writer, featuresXdp, false) +} + +func writeFeatureHeader(writer io.Writer, features map[string]bool, common bool) error { + input := struct { + Common bool + Features map[string]bool + }{ + Common: common, + Features: features, + } + + if err := tpl.Execute(writer, input); err != nil { + return fmt.Errorf("could not write template: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go new file mode 100644 index 0000000000..846e9c28e0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import "github.com/vishvananda/netlink" + +// Family type definitions +const ( + NTF_EXT_LEARNED = netlink.NTF_EXT_LEARNED + NTF_EXT_MANAGED = netlink.NTF_EXT_MANAGED +) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go new file mode 100644 index 0000000000..f92efd4990 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package probes + +// Dummy values on non-linux platform +const ( + NTF_EXT_LEARNED = iota + NTF_EXT_MANAGED +) diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go index 1258c1af89..2dd7214a14 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go @@ -86,6 +86,18 @@ const ( // HubbleRecorderSinkQueueSize is the queue size for each recorder sink HubbleRecorderSinkQueueSize = 1024 + // HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows + HubbleRedactEnabled = false + + // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows + HubbleRedactHttpURLQuery = false + + // HubbleRedactHttpUserInfo controls if the user info will be redacted from flows + HubbleRedactHttpUserInfo = true + + // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows + HubbleRedactKafkaApiKey = false + // MonitorSockPath1_2 is the path to the UNIX domain socket used to // distribute BPF and agent events to listeners. // This is the 1.2 protocol version. @@ -158,6 +170,13 @@ const ( // endpoints that are larger than 512 Bytes or the EDNS0 option, if present. ToFQDNsEnableDNSCompression = true + // DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy. + DNSProxyEnableTransparentMode = false + + // DNSProxySocketLingerTimeout defines how many seconds we wait for the connection + // between the DNS proxy and the upstream server to be closed. + DNSProxySocketLingerTimeout = 10 + // IdentityChangeGracePeriod is the default value for // option.IdentityChangeGracePeriod IdentityChangeGracePeriod = 5 * time.Second @@ -169,6 +188,10 @@ const ( // ExecTimeout is a timeout for executing commands. ExecTimeout = 300 * time.Second + // MaxInternalTimerDelay does not enforce a maximum on timer values in + // the agent by default. + MaxInternalTimerDelay = 0 * time.Second + // StatusCollectorInterval is the interval between a probe invocations StatusCollectorInterval = 5 * time.Second @@ -207,9 +230,6 @@ const ( // EnableHostLegacyRouting is the default value for using the old routing path via stack. EnableHostLegacyRouting = false - // K8sEnableEndpointSlice is the default value for k8s EndpointSlice feature. - K8sEnableEndpointSlice = true - // PreAllocateMaps is the default value for BPF map preallocation PreAllocateMaps = true @@ -224,6 +244,10 @@ const ( // be necessary on key rotations. EnableIPsecKeyWatcher = true + // Enable caching for XfrmState for IPSec. Significantly reduces CPU usage + // in large clusters. + EnableIPSecXfrmStateCaching = false + // EncryptNode enables encrypting traffic from host networking applications // which are not part of Cilium manged pods. EncryptNode = false @@ -274,6 +298,10 @@ const ( // EnableHealthCheckNodePort EnableHealthCheckNodePort = true + // EnableHealthCheckLoadBalancerIP is the default value for + // EnableHealthCheckLoadBalancerIP + EnableHealthCheckLoadBalancerIP = false + // AlignCheckerName is the BPF object name for the alignchecker. AlignCheckerName = "bpf_alignchecker.o" @@ -321,11 +349,7 @@ const ( // connection tracking garbage collection ConntrackGCStartingInterval = 5 * time.Minute - // K8sEventHandover enables use of the kvstore to optimize Kubernetes - // event handling by listening for k8s events in the operator and - // mirroring it into the kvstore for reduced overhead in large - // clusters. - K8sEventHandover = false + LegacyTurnOffK8sEventHandover = false // LoopbackIPv4 is the default address for service loopback LoopbackIPv4 = "169.254.42.1" @@ -370,9 +394,8 @@ const ( // CiliumNode.Spec.IPAM.PreAllocate if no value is set IPAMPreAllocation = 8 - // IPAMMultiPoolPreAllocation is the default value for multi-pool IPAM - // pre-allocations - IPAMMultiPoolPreAllocation = "default=8" + // IPAMDefaultIPPool is the default value for the multi-pool default pool name. + IPAMDefaultIPPool = "default" // ENIFirstInterfaceIndex is the default value for // CiliumNode.Spec.ENI.FirstInterfaceIndex if no value is set. @@ -414,16 +437,6 @@ const ( // IPAMAPIQPSLimit is the default QPS limit when rate limiting access to external APIs IPAMAPIQPSLimit = 4.0 - // IPAMPodCIDRAllocationThreshold is the default value for - // CiliumNode.Spec.IPAM.PodCIDRAllocationThreshold if no value is set - // Defaults to 8, which is similar to IPAMPreAllocation - IPAMPodCIDRAllocationThreshold = 8 - - // IPAMPodCIDRReleaseThreshold is the default value for - // CiliumNode.Spec.IPAM.PodCIDRReleaseThreshold if no value is set - // Defaults to 16, which is 2x the allocation threshold to avoid flapping - IPAMPodCIDRReleaseThreshold = 16 - // AutoCreateCiliumNodeResource enables automatic creation of a // CiliumNode resource for the local node AutoCreateCiliumNodeResource = true @@ -502,11 +515,9 @@ const ( // InstallNoConntrackRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules = false - // WireguardSubnetV4 is a default wireguard tunnel subnet - WireguardSubnetV4 = "172.16.43.0/24" - - // WireguardSubnetV6 is a default wireguard tunnel subnet - WireguardSubnetV6 = "fdc9:281f:04d7:9ee9::1/64" + // ContainerIPLocalReservedPortsAuto instructs the Cilium CNI plugin to reserve + // an auto-generated list of ports in the container network namespace + ContainerIPLocalReservedPortsAuto = "auto" // ExternalClusterIP enables cluster external access to ClusterIP services. // Defaults to false to retain prior behaviour of not routing external packets to ClusterIPs. @@ -521,13 +532,16 @@ const ( // TunnelProtocol is the default tunneling protocol TunnelProtocol = "vxlan" + // ServiceNoBackendResponse is the default response for services without backends + ServiceNoBackendResponse = "reject" + // Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation. UseCiliumInternalIPForIPsec = false // TunnelPortVXLAN is the default VXLAN port - TunnelPortVXLAN = 8472 + TunnelPortVXLAN uint16 = 8472 // TunnelPortGeneve is the default Geneve port - TunnelPortGeneve = 6081 + TunnelPortGeneve uint16 = 6081 // ARPBaseReachableTime resembles the kernel's NEIGH_VAR_BASE_REACHABLE_TIME which defaults to 30 seconds. ARPBaseReachableTime = 30 * time.Second @@ -541,6 +555,16 @@ const ( // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = true + + // MaxConnectedClusters sets the maximum number of clusters that can be + // connected in a clustermesh. + // The value is used to determine the bit allocation for cluster ID and + // identity in a numeric identity. Values > 255 will decrease the number of + // allocatable identities. + MaxConnectedClusters = 255 + + // EnableEnvoyConfig is the default value for option.EnableEnvoyConfig + EnableEnvoyConfig = false ) var ( @@ -573,4 +597,6 @@ var ( "cilium_lb6_source_range": "enabled,128,0", "cilium_lb6_affinity_match": "enabled,128,0", } + + PolicyCIDRMatchMode = []string{} ) diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/node.go b/vendor/github.com/cilium/cilium/pkg/defaults/node.go index facc7dec83..4cfeef0027 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/node.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/node.go @@ -23,6 +23,18 @@ const ( // SecondHostDevice is the name of the second interface of the host veth pair. SecondHostDevice = "cilium_net" + // IPIPv4Device is a device of type 'ipip', created by the agent. + IPIPv4Device = "cilium_ipip4" + + // IPIPv6Device is a device of type 'ip6tnl', created by the agent. + IPIPv6Device = "cilium_ipip6" + + // GeneveDevice is a device of type 'geneve', created by the agent. + GeneveDevice = "cilium_geneve" + + // VxlanDevice is a device of type 'vxlan', created by the agent. + VxlanDevice = "cilium_vxlan" + // CiliumK8sAnnotationPrefix is the prefix key for the annotations used in kubernetes. CiliumK8sAnnotationPrefix = "cilium.io/" @@ -52,4 +64,13 @@ var ( // IPv4DefaultRoute is the default IPv4 route. IPv4DefaultRoute = net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)} + + // ExcludedDevicePrefixes are prefixes that we don't consider during automatic device detection. + ExcludedDevicePrefixes = []string{ + "cilium_", + "lo", + "lxc", + "cni", + "docker", + } ) diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go index 73a2159bde..96b8d7b102 100644 --- a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go +++ b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go @@ -33,8 +33,16 @@ const ( // container ID. The container ID is specific to the container runtime // in use. Only the primary container that defines the networking scope // can be used to address an endpoint. + // This can only be used to look up endpoints which have not opted-out of + // legacy identifiers. + // Deprecated. Use CNIAttachmentIdPrefix instead ContainerIdPrefix PrefixType = "container-id" + // CNIAttachmentIdPrefix is used to address an endpoint via its primary + // container ID and container interface passed to the CNI plugin. + // This attachment ID uniquely identifies a CNI ADD and CNI DEL invocation pair. + CNIAttachmentIdPrefix PrefixType = "cni-attachment-id" + // DockerEndpointPrefix is used to address an endpoint via the Docker // endpoint ID. This method is only possible if the endpoint was // created via the cilium-docker plugin and the container is backed by @@ -45,11 +53,22 @@ const ( // container's name. This addressing mechanism depends on the container // runtime. Only the primary container that the networking scope can be // used to address an endpoint. + // This can only be used to look up endpoints which have not opted-out of + // legacy identifiers. + // Deprecated. Use CNIAttachmentIdPrefix instead ContainerNamePrefix PrefixType = "container-name" + // CEPNamePrefix is used to address an endpoint via its Kubernetes + // CiliumEndpoint resource name. This addressing only works if the endpoint + // is represented as a Kubernetes CiliumEndpoint resource. + CEPNamePrefix PrefixType = "cep-name" + // PodNamePrefix is used to address an endpoint via the Kubernetes pod // name. This addressing only works if the endpoint represents as // Kubernetes pod. + // This can only be used to look up endpoints which have not opted-out of + // legacy identifiers. + // Deprecated. May not be unique. Use CEPNamePrefix instead. PodNamePrefix PrefixType = "pod-name" // IPv4Prefix is used to address an endpoint via the endpoint's IPv4 @@ -62,7 +81,7 @@ const ( // NewCiliumID returns a new endpoint identifier of type CiliumLocalIdPrefix func NewCiliumID(id int64) string { - return fmt.Sprintf("%s:%d", CiliumLocalIdPrefix, id) + return NewID(CiliumLocalIdPrefix, strconv.FormatInt(id, 10)) } // NewID returns a new endpoint identifier @@ -82,9 +101,19 @@ func NewIPPrefixID(ip netip.Addr) string { return "" } +// NewCNIAttachmentID returns an identifier based on the CNI attachment ID. If +// the containerIfName is empty, only the containerID will be used. +func NewCNIAttachmentID(containerID, containerIfName string) string { + id := containerID + if containerIfName != "" { + id = containerID + ":" + containerIfName + } + return NewID(CNIAttachmentIdPrefix, id) +} + // splitID splits ID into prefix and id. No validation is performed on prefix. func splitID(id string) (PrefixType, string) { - if idx := strings.Index(id, ":"); idx > -1 { + if idx := strings.IndexByte(id, ':'); idx > -1 { return PrefixType(id[:idx]), id[idx+1:] } @@ -100,7 +129,7 @@ func ParseCiliumID(id string) (int64, error) { } n, err := strconv.ParseInt(id, 0, 64) if err != nil || n < 0 { - return 0, fmt.Errorf("invalid numeric cilium id: %s", err) + return 0, fmt.Errorf("invalid numeric cilium id: %w", err) } if n > MaxEndpointID { return 0, fmt.Errorf("endpoint id too large: %d", n) @@ -113,7 +142,16 @@ func ParseCiliumID(id string) (int64, error) { func Parse(id string) (PrefixType, string, error) { prefix, id := splitID(id) switch prefix { - case CiliumLocalIdPrefix, CiliumGlobalIdPrefix, ContainerIdPrefix, DockerEndpointPrefix, ContainerNamePrefix, PodNamePrefix, IPv4Prefix, IPv6Prefix: + case CiliumLocalIdPrefix, + CiliumGlobalIdPrefix, + CNIAttachmentIdPrefix, + ContainerIdPrefix, + DockerEndpointPrefix, + ContainerNamePrefix, + CEPNamePrefix, + PodNamePrefix, + IPv4Prefix, + IPv6Prefix: return prefix, id, nil } diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/client.go b/vendor/github.com/cilium/cilium/pkg/health/client/client.go index d61a3783ef..dd943633b2 100644 --- a/vendor/github.com/cilium/cilium/pkg/health/client/client.go +++ b/vendor/github.com/cilium/cilium/pkg/health/client/client.go @@ -317,8 +317,15 @@ func formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, } if succinct { if printAll || !nodeIsHealthy(node) { + ips := []string{getPrimaryAddressIP(node)} + for _, addr := range GetHostSecondaryAddresses(node) { + if addr == nil { + continue + } + ips = append(ips, addr.IP) + } fmt.Fprintf(w, " %s%s\t%s\t%s\t%s\n", node.Name, - localStr, getPrimaryAddressIP(node), + localStr, strings.Join(ips, ","), SummarizePathConnectivityStatusType(GetAllHostAddresses(node)).String(), SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)).String()) } diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/modules.go b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go new file mode 100644 index 0000000000..470da8ed15 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/duration" + + "github.com/cilium/cilium/api/v1/client/daemon" + "github.com/cilium/cilium/pkg/hive/cell" +) + +const ( + noPod = "(/)" + rootNode = "agent" + noErr = "" +) + +// ModulesHealth represent hive modules health API. +type ModulesHealth interface { + // GetHealth retrieves agent modules health. + GetHealth(params *daemon.GetHealthParams, opts ...daemon.ClientOption) (*daemon.GetHealthOK, error) +} + +// GetAndFormatModulesHealth retrieves modules health and formats output. +func GetAndFormatModulesHealth(w io.Writer, clt ModulesHealth, verbose bool) { + fmt.Fprintf(w, "Modules Health:") + resp, err := clt.GetHealth(daemon.NewGetHealthParams()) + if err != nil { + fmt.Fprintf(w, "\t%s\n", err) + return + } + + if resp.Payload == nil { + fmt.Fprintf(w, "\tno health payload detected\n") + return + } + if verbose { + r := newRoot(rootNode) + sort.Slice(resp.Payload.Modules, func(i, j int) bool { + return resp.Payload.Modules[i].ModuleID < resp.Payload.Modules[j].ModuleID + }) + for _, m := range resp.Payload.Modules { + if m.Level == string(cell.StatusUnknown) { + continue + } + if err := buildTree(r, m.Message); err != nil { + fmt.Fprintf(w, "Modules Health rendering failed: %s\n", err) + } + } + fmt.Fprintln(w, "\n"+r.String()) + return + } + tally := make(map[cell.Level]int, 4) + for _, m := range resp.Payload.Modules { + tally[cell.Level(m.Level)] += 1 + } + fmt.Fprintf(w, "\t%s(%d) %s(%d) %s(%d)\n", + cell.StatusStopped, + tally[cell.StatusStopped], + cell.StatusDegraded, + tally[cell.StatusDegraded], + cell.StatusOK, + tally[cell.StatusOK], + ) +} + +func buildTree(n *node, raw string) error { + var sn cell.StatusNode + if err := json.Unmarshal([]byte(raw), &sn); err != nil { + return err + } + build(n, &sn) + return nil +} + +func ensurePath(n *node, pp []string) *node { + current := n + for _, p := range pp { + if v := current.find(p); v != nil { + current = v + continue + } + current = current.addBranch(strings.Replace(p, noPod, "", 1)) + } + + return current +} + +func build(n *node, sn *cell.StatusNode) { + meta := fmt.Sprintf("[%s] %s", strings.ToUpper(string(sn.LastLevel)), sn.Message) + if sn.Error != "" { + meta += " -- " + sn.Error + } + meta += fmt.Sprintf(" (%s, x%d)", ToAgeHuman(sn.UpdateTimestamp), sn.Count) + pp := strings.Split(sn.Name, ".") + current := ensurePath(n, pp) + if len(sn.SubStatuses) == 0 { + current.meta = meta + return + } + for _, s := range sn.SubStatuses { + build(current, s) + } +} + +// ToAgeHuman converts time to duration. +func ToAgeHuman(t time.Time) string { + if t.IsZero() { + return "n/a" + } + + return duration.HumanDuration(time.Since(t)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/tree.go b/vendor/github.com/cilium/cilium/pkg/health/client/tree.go new file mode 100644 index 0000000000..b8dccd5a51 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/health/client/tree.go @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + indentSize = 3 + leafMaxWidth = 40 + link decoration = "│" + mid decoration = "├──" + end decoration = "└──" +) + +type decoration string + +func newRoot(r string) *node { + return &node{val: r} +} + +type node struct { + val, meta string + parent *node + nodes []*node +} + +func (n *node) addNode(v string) *node { + return n.addNodeWithMeta(v, "") +} + +func (n *node) addNodeWithMeta(v, m string) *node { + node := node{ + parent: n, + val: v, + meta: m, + } + n.nodes = append(n.nodes, &node) + + return &node +} + +func (n *node) addBranch(v string) *node { + return n.addBranchWithMeta(v, "") +} + +func (n *node) addBranchWithMeta(v, m string) *node { + b := node{ + parent: n, + meta: m, + val: v, + } + n.nodes = append(n.nodes, &b) + + return &b +} + +func (n *node) find(val string) *node { + if n.val == val { + return n + } + for _, node := range n.nodes { + if node.val == val { + return node + } + if v := node.find(val); v != nil { + return v + } + } + + return nil +} + +func (n *node) asBytes() []byte { + var ( + w = new(bytes.Buffer) + levelsEnded []int + max = computeMaxLevel(0, n) + ) + if n.parent == nil { + w.WriteString(n.val) + if n.meta != "" { + w.WriteString(" " + n.meta) + } + fmt.Fprintln(w) + } else { + edge := mid + if len(n.nodes) == 0 { + edge = end + levelsEnded = append(levelsEnded, 0) + } + dumpVals(w, 0, max, levelsEnded, edge, n) + } + if len(n.nodes) > 0 { + dumpNodes(w, 0, max, levelsEnded, n.nodes) + } + + return w.Bytes() +} + +func (n *node) String() string { + return string(n.asBytes()) +} + +func (n *node) lastNode() *node { + c := len(n.nodes) + if c == 0 { + return nil + } + + return n.nodes[c-1] +} + +func computeMaxLevel(level int, n *node) int { + if n == nil || len(n.nodes) == 0 { + return level + } + var max int + for _, n := range n.nodes { + m := computeMaxLevel(level+1, n) + if m > max { + max = m + } + } + + return max +} + +func dumpNodes(w io.Writer, level, maxLevel int, levelsEnded []int, nodes []*node) { + for i, node := range nodes { + edge := mid + if i == len(nodes)-1 { + levelsEnded = append(levelsEnded, level) + edge = end + } + dumpVals(w, level, maxLevel, levelsEnded, edge, node) + if len(node.nodes) > 0 { + dumpNodes(w, level+1, maxLevel, levelsEnded, node.nodes) + } + } +} + +func dumpVals(w io.Writer, level, maxLevel int, levelsEnded []int, edge decoration, node *node) { + for i := 0; i < level; i++ { + if isEnded(levelsEnded, i) { + fmt.Fprint(w, strings.Repeat(" ", indentSize+1)) + continue + } + fmt.Fprintf(w, "%s%s", link, strings.Repeat(" ", indentSize)) + } + + val := dumpVal(level, node) + if node.meta != "" { + c := maxLevel - level + if c < 0 { + c = 0 + } + fmt.Fprintf(w, "%s %-"+strconv.Itoa(leafMaxWidth+c*2)+"s%s%s\n", edge, val, strings.Repeat(" ", c), node.meta) + return + } + fmt.Fprintf(w, "%s %s\n", edge, val) +} + +func isEnded(levelsEnded []int, level int) bool { + for _, l := range levelsEnded { + if l == level { + return true + } + } + + return false +} + +func dumpVal(level int, node *node) string { + lines := strings.Split(node.val, "\n") + if len(lines) < 2 { + return node.val + } + + pad := indent(level, node) + for i := 1; i < len(lines); i++ { + lines[i] = fmt.Sprintf("%s%s", pad, lines[i]) + } + + return strings.Join(lines, "\n") +} + +func indent(level int, node *node) string { + links := make([]string, level+1) + for node.parent != nil { + if isLast(node) { + links[level] = strings.Repeat(" ", indentSize+1) + } else { + links[level] = fmt.Sprintf("%s%s", link, strings.Repeat(" ", indentSize)) + } + level-- + node = node.parent + } + + return strings.Join(links, "") +} + +func isLast(n *node) bool { + return n == n.parent.lastNode() +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go index 860e03be38..f10e39439e 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/pflag" "go.uber.org/dig" + "github.com/cilium/cilium/pkg/cidr" "github.com/cilium/cilium/pkg/command" ) @@ -103,9 +104,26 @@ func decoderConfig(target any) *mapstructure.DecoderConfig { Result: target, WeaklyTypedInput: true, DecodeHook: mapstructure.ComposeDecodeHookFunc( + // To unify the splitting of fields of a []string field across the input coming + // from environment, configmap and pflag (command-line), we first split a string + // (env/configmap) by comma, and then for all input methods we split a single + // value []string by whitespace. Thus the following all result in the same slice: + // + // --string-slice=foo,bar,baz + // --string-slice="foo bar baz" + // CILIUM_STRING_SLICE="foo,bar,baz" + // CILIUM_STRING_SLICE="foo bar baz" + // /.../configmap/string_slice: "foo bar baz" + // /.../configmap/string_slice: "foo,bar,baz" + // + // If both commas and whitespaces are present the commas take precedence: + // "foo,bar baz" => []string{"foo", "bar baz"} + mapstructure.StringToSliceHookFunc(","), // string->[]string is split by comma + fixupStringSliceHookFunc, // []string of length 1 is split again by whitespace + mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - stringToMapHookFunc(), + stringToCIDRHookFunc, + stringToMapHookFunc, ), ZeroFields: true, // Error out if the config struct has fields that are @@ -143,14 +161,43 @@ func (c *config[Cfg]) Info(cont container) (info Info) { return } -// stringToMapHookFunc returns a DecodeHookFunc that converts string +// stringToMapHookFunc is a DecodeHookFunc that converts string // to map[string]string supporting both json and KV formats. -func stringToMapHookFunc() mapstructure.DecodeHookFunc { - return func(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { - if from != reflect.String || to != reflect.Map { - return data, nil - } +func stringToMapHookFunc(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { + if from != reflect.String || to != reflect.Map { + return data, nil + } + return command.ToStringMapStringE(data.(string)) +} + +// stringToCIDRSliceHookFunc is a DecodeHookFunc that converts string to []*cidr.CIDR. +func stringToCIDRHookFunc(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.String { + return data, nil + } + s := data.(string) + if to != reflect.TypeOf((*cidr.CIDR)(nil)) { + return data, nil + } + return cidr.ParseCIDR(s) +} + +// fixupStringSliceHookFunc takes a []string and if it's a single element splits it again +// by whitespace. This unifies the flag parsing behavior with StringSlice +// values coming from environment or configmap where both spaces or commas can be used to split. +func fixupStringSliceHookFunc(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.Slice || to.Kind() != reflect.Slice { + return data, nil + } + if from.Elem().Kind() != reflect.String || to.Elem().Kind() != reflect.String { + return data, nil + } - return command.ToStringMapStringE(data.(string)) + raw := data.([]string) + if len(raw) == 1 { + // Flag was already split by commas (the default behavior), so split it + // now by spaces. + return strings.Fields(raw[0]), nil } + return raw, nil } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go index 98388d8f23..aa4dc93f43 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go @@ -6,13 +6,14 @@ package cell import ( "context" "fmt" + "sort" "sync/atomic" "time" "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/stream" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // Level denotes what kind an update is. @@ -47,6 +48,8 @@ type HealthReporter interface { // Stopped reports that a module has completed, and will no longer report any // health status. + // Implementations should differentiate that a stopped module may also be OK or Degraded. + // Stopping a reporting should only affect future updates. Stopped(reason string) // Degraded declares that a module has entered a degraded state. @@ -55,6 +58,25 @@ type HealthReporter interface { Degraded(reason string, err error) } +// Update represents an instantaneous health status update. +type Update interface { + // Level returns the level of the update. + Level() Level + + // String returns a string representation of the update. + String() string + + // JSON returns a JSON representation of the update, this is used by the agent + // health CLI to unmarshal health status into cell.StatusNode. + JSON() ([]byte, error) + + Timestamp() time.Time +} + +type statusNodeReporter interface { + setStatus(Update) +} + // Health provides exported functions for accessing health status data. // As well, provides unexported functions for use during module apply. type Health interface { @@ -64,41 +86,62 @@ type Health interface { // Get returns a copy of a modules status, by module ID. // This includes unknown status for modules that have not reported a status yet. - Get(string) *Status + Get(FullModuleID) (Status, error) + + // Stats returns a map of the number of module statuses reported by level. + Stats() map[Level]uint64 // Stop stops the health provider from processing updates. Stop(context.Context) error + // Subscribe to health status updates. + Subscribe(context.Context, func(Update), func(error)) + // forModule creates a moduleID scoped reporter handle. - forModule(string) HealthReporter + forModule(FullModuleID) statusNodeReporter // processed returns the number of updates processed. processed() uint64 } -// Update is an event that denotes the change of a modules health state. -type Update struct { - Level - ModuleID string - Message string - Err error +type StatusResult struct { + Update + FullModuleID FullModuleID + Stopped bool } // Status is a modules last health state, including the last update. type Status struct { // Update is the last reported update for a module. Update + + FullModuleID FullModuleID + // Stopped is true when a module has been completed, thus it contains // its last reporter status. New updates will not be processed. Stopped bool // Final is the stopped message, if the module has been stopped. - Final string + Final Update // LastOK is the time of the last OK status update. LastOK time.Time // LastUpdated is the time of the last status update. LastUpdated time.Time } +func (s *Status) JSON() ([]byte, error) { + if s.Update == nil { + return nil, nil + } + return s.Update.JSON() +} + +func (s *Status) Level() Level { + if s.Update == nil { + return StatusUnknown + } + return s.Update.Level() +} + // String returns a string representation of a Status, implements fmt.Stringer. func (s *Status) String() string { var sinceLast string @@ -107,8 +150,8 @@ func (s *Status) String() string { } else { sinceLast = time.Since(s.LastUpdated).String() + " ago" } - return fmt.Sprintf("Status{ModuleID: %s, Level: %s, Since: %s, Message: %s, Err: %v}", - s.ModuleID, s.Level, sinceLast, s.Message, s.Err) + return fmt.Sprintf("Status{ModuleID: %s, Level: %s, Since: %s, Message: %s}", + s.FullModuleID, s.Level(), sinceLast, s.Update.String()) } // NewHealthProvider starts and returns a health status which processes @@ -116,23 +159,40 @@ func (s *Status) String() string { func NewHealthProvider() Health { p := &healthProvider{ moduleStatuses: make(map[string]Status), + byLevel: make(map[Level]uint64), running: true, } + p.obs, p.emit, p.complete = stream.Multicast[Update]() + return p } +func (p *healthProvider) Subscribe(ctx context.Context, cb func(Update), complete func(error)) { + p.obs.Observe(ctx, cb, complete) +} + func (p *healthProvider) processed() uint64 { return p.numProcessed.Load() } -func (p *healthProvider) process(u Update) { +func (p *healthProvider) updateMetricsLocked(prev Update, curr Level) { + // If an update is processed that transitions the level state of a module + // then update the level counters. + if prev.Level() != curr { + p.byLevel[curr]++ + p.byLevel[prev.Level()]-- + } +} + +func (p *healthProvider) process(id FullModuleID, u Update) { prev := func() Status { p.mu.Lock() defer p.mu.Unlock() t := time.Now() - prev := p.moduleStatuses[u.ModuleID] + prev := p.moduleStatuses[id.String()] + // If the module has been stopped, then ignore updates. if !p.running { return prev } @@ -141,22 +201,25 @@ func (p *healthProvider) process(u Update) { Update: u, LastUpdated: t, } - switch u.Level { + + switch u.Level() { case StatusOK: ns.LastOK = t case StatusStopped: // If Stopped, set that module was stopped and preserve last known status. ns = prev ns.Stopped = true - ns.Final = u.Message + ns.Final = u } - p.moduleStatuses[u.ModuleID] = ns + p.moduleStatuses[id.String()] = ns + p.updateMetricsLocked(prev.Update, u.Level()) log.WithField("status", ns.String()).Debug("Processed new health status") return prev }() p.numProcessed.Add(1) + p.emit(u) if prev.Stopped { - log.Warnf("module %q reported health status after being Stopped", u.ModuleID) + log.Warnf("module %q reported health status after being Stopped", id) } } @@ -166,18 +229,21 @@ func (p *healthProvider) Stop(ctx context.Context) error { p.mu.Lock() defer p.mu.Unlock() p.running = false // following this, no new reporters will send. + p.complete(nil) // complete the observable, no new subscribers will receive further updates. return nil } +var NoStatus = &StatusNode{Message: "No status reported", LastLevel: StatusUnknown} + // forModule returns a module scoped status reporter handle for emitting status updates. // This is used to automatically provide declared modules with a status reported. -func (p *healthProvider) forModule(moduleID string) HealthReporter { +func (p *healthProvider) forModule(moduleID FullModuleID) statusNodeReporter { p.mu.Lock() - p.moduleStatuses[moduleID] = Status{Update: Update{ - ModuleID: moduleID, - Level: StatusUnknown, - Message: "No status reported yet"}, + p.moduleStatuses[moduleID.String()] = Status{ + FullModuleID: moduleID, + Update: NoStatus, } + p.byLevel[StatusUnknown]++ p.mu.Unlock() return &reporter{ @@ -191,21 +257,29 @@ func (p *healthProvider) All() []Status { p.mu.RLock() all := maps.Values(p.moduleStatuses) p.mu.RUnlock() - slices.SortFunc(all, func(a, b Status) bool { - return a.ModuleID < b.ModuleID + sort.Slice(all, func(i, j int) bool { + return all[i].FullModuleID.String() < all[j].FullModuleID.String() }) return all } // Get returns the latest status for a module, by module ID. -func (p *healthProvider) Get(moduleID string) *Status { +func (p *healthProvider) Get(moduleID FullModuleID) (Status, error) { p.mu.RLock() defer p.mu.RUnlock() - s, ok := p.moduleStatuses[moduleID] + s, ok := p.moduleStatuses[moduleID.String()] if ok { - return &s + return s, nil } - return nil + return Status{}, fmt.Errorf("module %q not found", moduleID) +} + +func (p *healthProvider) Stats() map[Level]uint64 { + n := make(map[Level]uint64, len(p.byLevel)) + p.mu.Lock() + maps.Copy(n, p.byLevel) + p.mu.Unlock() + return n } type healthProvider struct { @@ -214,27 +288,22 @@ type healthProvider struct { running bool numProcessed atomic.Uint64 + byLevel map[Level]uint64 moduleStatuses map[string]Status + + obs stream.Observable[Update] + emit func(Update) + complete func(error) } // reporter is a handle for emitting status updates. type reporter struct { - moduleID string - process func(Update) + moduleID FullModuleID + process func(FullModuleID, Update) } // Degraded reports a degraded status update, should be used when a module encounters a // a state that is not fully reconciled. -func (r *reporter) Degraded(reason string, err error) { - r.process(Update{ModuleID: r.moduleID, Level: StatusDegraded, Message: reason, Err: err}) -} - -// Stopped reports that a module has stopped, further updates will not be processed. -func (r *reporter) Stopped(reason string) { - r.process(Update{ModuleID: r.moduleID, Level: StatusStopped, Message: reason}) -} - -// OK reports that a module is in a healthy state. -func (r *reporter) OK(status string) { - r.process(Update{ModuleID: r.moduleID, Level: StatusOK, Message: status}) +func (r *reporter) setStatus(u Update) { + r.process(r.moduleID, u) } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go index 449c2748a3..31beeea151 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go @@ -4,30 +4,35 @@ package cell import ( + "fmt" + "sort" + "strings" "time" + "go.uber.org/dig" + "github.com/cilium/cilium/pkg/hive/internal" ) type invoker struct { - cont container funcs []namedFunc } type namedFunc struct { name string fn any + info dig.InvokeInfo } type InvokerList interface { AppendInvoke(func() error) } -func (i *invoker) invoke() error { - for _, afn := range i.funcs { +func (inv *invoker) invoke(cont container) error { + for i, afn := range inv.funcs { log.WithField("function", afn.name).Debug("Invoking") t0 := time.Now() - if err := i.cont.Invoke(afn.fn); err != nil { + if err := cont.Invoke(afn.fn, dig.FillInvokeInfo(&inv.funcs[i].info)); err != nil { log.WithError(err).WithField("", afn.name).Error("Invoke failed") return err } @@ -39,7 +44,7 @@ func (i *invoker) invoke() error { func (i *invoker) Apply(c container) error { // Remember the scope in which we need to invoke. - i.cont = c + invoker := func() error { return i.invoke(c) } // Append the invoker to the list of invoke functions. These are invoked // prior to start to build up the objects. They are not invoked directly @@ -48,14 +53,23 @@ func (i *invoker) Apply(c container) error { // we don't yet know which command to run, but we still need to register // all the flags. return c.Invoke(func(l InvokerList) { - l.AppendInvoke(i.invoke) + l.AppendInvoke(invoker) }) } func (i *invoker) Info(container) Info { n := NewInfoNode("") for _, namedFunc := range i.funcs { - n.AddLeaf("🛠️ %s: %s", namedFunc.name, internal.PrettyType(namedFunc.fn)) + invNode := NewInfoNode(fmt.Sprintf("🛠️ %s", namedFunc.name)) + invNode.condensed = true + + var ins []string + for _, input := range namedFunc.info.Inputs { + ins = append(ins, internal.TrimName(input.String())) + } + sort.Strings(ins) + invNode.AddLeaf("⇨ %s", strings.Join(ins, ", ")) + n.Add(invNode) } return n } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go similarity index 78% rename from vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go rename to vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go index 412e30fbb5..95be1b15b8 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/lifecycle.go @@ -1,15 +1,14 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Cilium -package hive +package cell import ( "context" + "errors" "fmt" "time" - "go.uber.org/multierr" - "github.com/cilium/cilium/pkg/hive/internal" "github.com/cilium/cilium/pkg/lock" ) @@ -20,6 +19,17 @@ import ( // initialize) must abort any such operation if this context is cancelled. type HookContext context.Context +// HookInterface mirrors the Hook interface from pkg/hive/lifecycle.go. +// Because pkg/hive/cell depends on HookInterface then we need to have a +// copy of it here. +// Hive provides a "cell" version of this HookInterface interface that does +// not depend on pkg/hive/lifecycle.go thus allowing the cell package to define +// lifecycle hooks. +type HookInterface interface { + Start(HookContext) error + Stop(HookContext) error +} + // Hook is a pair of start and stop callbacks. Both are optional. // They're paired up to make sure that on failed start all corresponding // stop hooks are executed. @@ -42,25 +52,14 @@ func (h Hook) Stop(ctx HookContext) error { return h.OnStop(ctx) } -type HookInterface interface { - // Start hook is called when the hive is started. - // Returning a non-nil error causes the start to abort and - // the stop hooks for already started cells to be called. - // - // The context is valid only for the duration of the start - // and is used to allow aborting of start hook on timeout. - Start(HookContext) error - - // Stop hook is called when the hive is stopped or start aborted. - // Returning a non-nil error does not abort stopping. The error - // is recorded and rest of the stop hooks are executed. - Stop(HookContext) error -} - // Lifecycle enables cells to register start and stop hooks, either // from a constructor or an invoke function. type Lifecycle interface { Append(HookInterface) + + Start(context.Context) error + Stop(context.Context) error + PrintHooks() } // DefaultLifecycle lifecycle implements a simple lifecycle management that conforms @@ -68,15 +67,20 @@ type Lifecycle interface { // (e.g. operator). type DefaultLifecycle struct { mu lock.Mutex - hooks []HookInterface + hooks []augmentedHook numStarted int } +type augmentedHook struct { + HookInterface + moduleID FullModuleID +} + func (lc *DefaultLifecycle) Append(hook HookInterface) { lc.mu.Lock() defer lc.mu.Unlock() - lc.hooks = append(lc.hooks, hook) + lc.hooks = append(lc.hooks, augmentedHook{hook, nil}) } func (lc *DefaultLifecycle) Start(ctx context.Context) error { @@ -122,7 +126,7 @@ func (lc *DefaultLifecycle) Stop(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - var errs []error + var errs error for ; lc.numStarted > 0; lc.numStarted-- { if ctx.Err() != nil { return ctx.Err() @@ -138,13 +142,13 @@ func (lc *DefaultLifecycle) Stop(ctx context.Context) error { t0 := time.Now() if err := hook.Stop(ctx); err != nil { l.WithError(err).Error("Stop hook failed") - errs = append(errs, err) + errs = errors.Join(errs, err) } else { d := time.Since(t0) l.WithField("duration", d).Info("Stop hook executed") } } - return multierr.Combine(errs...) + return errs } func (lc *DefaultLifecycle) PrintHooks() { @@ -153,27 +157,39 @@ func (lc *DefaultLifecycle) PrintHooks() { fmt.Printf("Start hooks:\n\n") for _, hook := range lc.hooks { - fnName, exists := getHookFuncName(hook, true) + fnName, exists := getHookFuncName(hook.HookInterface, true) if !exists { continue } - fmt.Printf(" • %s\n", fnName) + fmt.Printf(" • %s (%s)\n", fnName, hook.moduleID) } fmt.Printf("\nStop hooks:\n\n") for i := len(lc.hooks) - 1; i >= 0; i-- { hook := lc.hooks[i] - fnName, exists := getHookFuncName(hook, false) + fnName, exists := getHookFuncName(hook.HookInterface, false) if !exists { continue } - fmt.Printf(" • %s\n", fnName) + fmt.Printf(" • %s (%s)\n", fnName, hook.moduleID) } } +type augmentedLifecycle struct { + *DefaultLifecycle + moduleID FullModuleID +} + +func (lc augmentedLifecycle) Append(hook HookInterface) { + lc.mu.Lock() + defer lc.mu.Unlock() + + lc.hooks = append(lc.hooks, augmentedHook{hook, lc.moduleID}) +} + func getHookFuncName(hook HookInterface, start bool) (name string, hasHook bool) { // Ok, we need to get a bit fancy here as runtime.FuncForPC does - // not return what we want: we get "hive.Hook.Stop()" when we want + // not return what we want: we get "cell.Hook.Stop()" when we want // "*foo.Stop(). We do know the concrete type, and we do know // the method name, so we check here whether we're dealing with // "Hook" the struct, or an object implementing HookInterface. @@ -183,6 +199,9 @@ func getHookFuncName(hook HookInterface, start bool) (name string, hasHook bool) // and the type params would be missing, so instead we'll just use the // type name + method name. switch hook := hook.(type) { + case augmentedHook: + name, hasHook = getHookFuncName(hook.HookInterface, start) + return case Hook: if start { if hook.OnStart == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go index 74fa5f98e1..5f5fcbb284 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go @@ -6,6 +6,8 @@ package cell import ( "fmt" "regexp" + "slices" + "strings" "github.com/sirupsen/logrus" "go.uber.org/dig" @@ -26,6 +28,22 @@ func Module(id, title string, cells ...Cell) Cell { return &module{id, title, cells} } +// ModuleID is the module identifier. Provided in the module's scope. +type ModuleID string + +// FullModuleID is the fully qualified module identifier, e.g. the +// concat of nested module ids, e.g. "agent.controlplane.endpoint-manager". +// Provided in the module's scope. +type FullModuleID []string + +func (f FullModuleID) String() string { + return strings.Join(f, ".") +} + +func (f FullModuleID) append(m ModuleID) FullModuleID { + return append(slices.Clone(f), string(m)) +} + var ( idRegex = regexp.MustCompile(`^[a-z][a-z0-9_\-]{1,30}$`) titleRegex = regexp.MustCompile(`^[a-zA-Z0-9_\- ]{1,80}$`) @@ -56,16 +74,69 @@ func (m *module) logger(log logrus.FieldLogger) logrus.FieldLogger { return log.WithField(logfields.LogSubsys, m.id) } -func (m *module) moduleScopedStatusReporter(p Health) HealthReporter { - return p.forModule(m.id) +func (m *module) moduleID() ModuleID { + return ModuleID(m.id) +} + +func (m *module) fullModuleID(parent FullModuleID) FullModuleID { + return parent.append(m.moduleID()) +} + +type reporterHooks struct { + rootScope *scope +} + +func (r *reporterHooks) Start(ctx HookContext) error { + r.rootScope.start() + return nil +} + +func (r *reporterHooks) Stop(ctx HookContext) error { + flushAndClose(r.rootScope, "Hive shutting down") + return nil +} + +func createStructedScope(id FullModuleID, p Health, lc Lifecycle) Scope { + rs := rootScope(id, p.forModule(id)) + lc.Append(&reporterHooks{rootScope: rs}) + return rs +} + +func (m *module) lifecycle(lc Lifecycle, fullID FullModuleID) Lifecycle { + switch lc := lc.(type) { + case *DefaultLifecycle: + return &augmentedLifecycle{ + lc, + fullID, + } + case *augmentedLifecycle: + return &augmentedLifecycle{ + lc.DefaultLifecycle, + fullID, + } + default: + return lc + } } func (m *module) Apply(c container) error { scope := c.Scope(m.id) + // Provide ModuleID and FullModuleID in the module's scope. + if err := scope.Provide(m.moduleID); err != nil { + return err + } + if err := scope.Decorate(m.fullModuleID); err != nil { + return err + } + // Provide module scoped status reporter, used for reporting module level // health status. - if err := scope.Provide(m.moduleScopedStatusReporter, dig.Export(false)); err != nil { + if err := scope.Provide(createStructedScope, dig.Export(false)); err != nil { + return err + } + + if err := scope.Decorate(m.lifecycle); err != nil { return err } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go new file mode 100644 index 0000000000..eca54d24c7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cell + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + "text/tabwriter" + "time" + + "github.com/cilium/cilium/pkg/inctimer" + "github.com/cilium/cilium/pkg/lock" + + "golang.org/x/exp/maps" + "k8s.io/apimachinery/pkg/util/duration" + "k8s.io/apimachinery/pkg/util/sets" +) + +type children sets.Set[string] + +// reporterMinTimeout is the minimum time between status realizations, this +// prevents excessive reporter tree walks which hold the lock. +// This also acts as a rate limiter for status updates, if a update is not realized +// because the minimum timeout has not elapsed, it will eventually be realized by +// a periodic wakeup of the same interval. +// +// HealthReporting is not intendeds to capture high frequency events, but rather provide +// a structured view of the health of the system. +var reporterMinTimeout = time.Millisecond * 500 + +// Scope provides a node in the structured health reporter tree that is +// serves only as a parent for other nodes (scopes or reporters), and is +// used to group related reporters together. +type Scope interface { + // Name returns the name of the scope. + Name() string + + // Close removes the scope from the tree, and stops all reporters under this scope. + // Using a reporter that is under this scope after Close has been called will result + // in a noop update and warning log. + // Thus it is preferable for all reporters to be Stopped first, before calling Close. + Close() + + scope() *subReporter +} + +// GetSubScope creates a new reporter scope under the given parent scope. +// This creates a new node in the structured health reporter tree, and any calls +// to GetSubScope or GetHealthReporter from the returned scope will return a child node +// of this reporter tree. +// +// GetSubScope can be chained together to create various levels of sub reporters. +// +// Example: +// +// 1. Init root scope (note: this is provided to modules automatically). +// root := rootScope(hr) +// +// root +// +// 2. Create endpoint-manager subscope, and reporter under that scope (with ok!) +// +// endpointManagerScope := GetSubScope(root, "endpoint-manager") +// GetHealthReporter(endpointManagerScope, "endpoint-000").OK("it works!") +// +// root(OK) +// └── scope(endpoint-manager, OK) +// └── reporter(endpoint-000, OK) +// +// 3. Create another reporter under that scope with degraded +// GetHealthReporter(endpointManagerScope, "endpoint-000").Degraded("oh no!") +// +// root(Degraded) +// └── scope(endpoint-manager, Degraded) +// └── reporter(endpoint-000, OK) +// └── reporter(endpoint-000, Degraded) +// +// 4. Close the endpoint-manager scope +// s.Close() +// +// root(OK) // status has been reported, but we no longer have any degraded status +// // default to ok status. +func GetSubScope(parent Scope, name string) Scope { + if parent == nil { + return nil + } + return createSubScope(parent, name) +} + +// GetHealthReporter creates a new reporter under the given parent scope. +func GetHealthReporter(parent Scope, name string) HealthReporter { + if parent == nil { + return &noopReporter{} + } + return getSubReporter(parent, name, true) +} + +// TestScope exposes creating a root scope for testing purposes only. +func TestScope() Scope { + return TestScopeFromProvider(FullModuleID{"test"}, NewHealthProvider()) +} + +// TestScope exposes creating a root scope from a health provider for testing purposes only. +func TestScopeFromProvider(moduleID FullModuleID, hp Health) Scope { + s := rootScope(moduleID, hp.forModule(moduleID)) + s.start() + return s +} + +func rootScope(id FullModuleID, hr statusNodeReporter) *scope { + r := &subReporter{ + base: &subreporterBase{ + hr: hr, + idToChildren: map[string]children{}, + nodes: map[string]*node{}, + wakeup: make(chan struct{}, 16), + }, + } + // create root node, required in case reporters are created without any subscopes. + r.id = r.base.addChild("", id.String(), false) + r.base.rootID = r.id + + // Realize walks the tree and creates a updated status for the reporter. + // Because this is blocking and can be expensive, we have a reconcile loop + // that only performs this if the revision has changed. + realize := func() { + if r.base.stopped { + return + } + statusTree := r.base.getStatusTreeLocked(r.base.rootID) + if r.base.stopped { + r.base.hr.setStatus(statusTree) + return + } + if r.base.revision.Load() == 0 { + return + } + r.base.hr.setStatus(statusTree) + } + + r.scheduleRealize = func() { + r.base.revision.Add(1) + r.base.wakeup <- struct{}{} + } + r.realizeSync = realize + + return &scope{subReporter: r} +} + +func (r *scope) start() { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + var rev uint64 + var lastUpdate time.Time + for { + select { + case <-inctimer.After(reporterMinTimeout): + case <-r.base.wakeup: + case <-ctx.Done(): + } + if rev < r.base.revision.Load() && time.Since(lastUpdate) > reporterMinTimeout { + rev = r.base.revision.Load() + r.base.Lock() + r.realizeSync() + r.base.Unlock() + lastUpdate = time.Now() + } + if ctx.Err() != nil { + return + } + } + }() + r.closeReconciler = cancel +} + +// Flushes out any remaining unprocessed updates and closes the reporter tree. +// Used to finalize any remaining status updates before the module is stopped. +// This will allow for collecting of health status during shutdown. +func flushAndClose(rs Scope, reason string) { + rs.scope().base.Lock() + defer rs.scope().base.Unlock() + + // Stop reconciler loop, flush any pending updates synchronously. + rs.scope().closeReconciler() + + // Realize and flush the final status. + rs.scope().realizeSync() + + // Mark the module as stopped, and emit a stopped status. + rs.scope().base.stopped = true + rs.scope().base.hr.setStatus(&StatusNode{ + ID: rs.scope().base.rootID, + LastLevel: StatusStopped, + Message: reason, + }) + rs.scope().base.removeTreeLocked(rs.scope().id) +} + +type scope struct { + *subReporter +} + +func (s *scope) Close() { + s.base.Lock() + s.base.removeRefLocked(s.id) + s.base.removeTreeLocked(s.id) + s.base.Unlock() + s.scheduleRealize() +} + +// A scope can be removed if it has no references and all child scopes can be removed. +// Reporter leaf nodes are always immediately removed when Stopped. Thus if the condition +// holds that all subtrees of the scope can be removed, then the scope can be removed. +func (s *subreporterBase) canRemoveTreeLocked(id string) bool { + if _, ok := s.nodes[id]; ok { + node := s.nodes[id] + if (node.isReporter) || s.nodes[id].refs > 0 { + return false + } + for child := range s.idToChildren[id] { + if !s.canRemoveTreeLocked(child) { + return false + } + } + } + // If it does not exist, we assume it's ok to remove (noop). + return true +} + +func (s *scope) scope() *subReporter { + return s.subReporter +} + +func (s *scope) Name() string { + return s.name +} + +// When a scope is orphaned and garbage collected, we want to remove it from the tree if +// the following condition is met: +// 1. All scopes under this scope also have no references. +// 2. There are no reporters under this scope (reporters are always removed immediately +// after they are stopped). +// +// This means that scopes are only kept in the tree if they either have referenced subscopes, +// or if they have reporters under them. +// If a scope is orphaned, and all it's children are orphaned, and it has no reporter children +// then it is impossible for any new reporters to be created under this scope. +// +// Because reporters are only removed when they are explicitly stopped, this means that if a +// reporter node emits a ok/degraded status and then is orphaned. +// This is ok, because we're primarily interested in ensure that ephemerally created scopes that +// are never reported upon and then lost do not grow the tree indefinitely. +func createSubScope(parent Scope, name string) *scope { + s := &scope{ + subReporter: getSubReporter(parent, name, false), + } + runtime.SetFinalizer(s, func(s *scope) { + s.base.Lock() + s.base.removeRefLocked(s.id) + if s.base.canRemoveTreeLocked(s.id) { + s.base.removeTreeLocked(s.id) + } + s.base.Unlock() + runtime.SetFinalizer(s, nil) + }) + return s +} + +func getSubReporter(parent Scope, name string, isReporter bool) *subReporter { + return scopeFromParent(parent, name, isReporter) +} + +func scopeFromParent(parent Scope, name string, isReporter bool) *subReporter { + r := parent.scope() + r.base.Lock() + defer r.base.Unlock() + + // If such a reporter already exists at this scope, we just return the same reporter + // by recreating the subreporter. + for cid := range r.base.idToChildren[r.id] { + child := r.base.nodes[cid] + if child.name == name { + r.base.addRefLocked(cid) + return &subReporter{ + base: r.base, + id: cid, + scheduleRealize: r.scheduleRealize, + name: name, + } + } + } + + id := r.base.addChild(r.id, name, isReporter) + + return &subReporter{ + base: r.base, + id: id, + scheduleRealize: r.scheduleRealize, + name: name, + } +} + +// subreporterBase is the base implementation of a structured health reporter. +// Each node in a reporter tree (i.e. for each cell.Module) has a pointer to +// the single subreporterBase. +// subreporterBase maintains the tree structure, as well as is responsible for +// realizing the status tree, and emitting the status to the module HealthReporter. +type subreporterBase struct { + lock.Mutex + + // Module level health reporter, all realized status is emitted to this reporter. + hr statusNodeReporter + + // idToChildren is the adjacency map of parentID to children IDs. + idToChildren map[string]children + nodes map[string]*node + + // rootID is the root node of the tree, it should always exist in idToChildren and nodes. + rootID string + + stopped bool + + // Variables used for realization loop, because realization involves traversing the tree + // we only perform this when the revision has changed. + revision atomic.Uint64 + counter atomic.Int32 + wakeup chan struct{} +} + +func (s *subreporterBase) addNode(n *node) { + if _, ok := s.idToChildren[n.parentID]; !ok { + s.idToChildren[n.parentID] = children{} + } + s.idToChildren[n.parentID][n.id] = struct{}{} + s.idToChildren[n.id] = children{} + s.nodes[n.id] = n +} + +func (s *subreporterBase) addChild(pid string, name string, isReporter bool) string { + id := strconv.Itoa(int(s.counter.Add(1))) + "-" + name + s.addNode(&node{ + id: id, + parentID: pid, + count: 1, + nodeUpdate: nodeUpdate{ + Level: StatusUnknown, + Timestamp: time.Now(), + }, + name: name, + isReporter: isReporter, + refs: 1, + }) + return id +} + +var errReporterStopped = errors.New("reporter has been stopped") + +func (s *subreporterBase) setStatus(id string, level Level, message string, err error) error { + s.Lock() + defer s.Unlock() + + if s.stopped { + return fmt.Errorf("reporter tree %s has been stopped", id) + } + + if _, ok := s.nodes[id]; !ok { + return fmt.Errorf("could not set status for reporter %s: %w", id, errReporterStopped) + } + + n := s.nodes[id] + + if n.Level == level && n.Message == message { + n.count++ + } else { + n.count = 1 + } + + n.Level = level + n.Message = message + n.Error = err + return nil +} + +func (s *subreporterBase) removeTreeLocked(rid string) { + for child := range s.idToChildren[rid] { + s.removeTreeLocked(child) + } + // Safely remove parents reference to this node. + if _, ok := s.nodes[rid]; ok { + pid := s.nodes[rid].parentID + delete(s.idToChildren[pid], rid) + } + delete(s.idToChildren, rid) + delete(s.nodes, rid) +} + +// StatusNode is a model struct for a status tree realization result. +// It is created upon status tree realization, for now it is only used for +// for generating a plaintext representation of the status tree. +// In the future we will want to use this to generate a structured JSON representation +// of the status tree. +type StatusNode struct { + ID string `json:"id"` + LastLevel Level `json:"level,omitempty"` + Name string `json:"name"` + Message string `json:"message,omitempty"` + UpdateTimestamp time.Time `json:"timestamp"` + Count int `json:"count"` + SubStatuses []*StatusNode `json:"sub_statuses,omitempty"` + Error string `json:"error,omitempty"` +} + +var _ Update = (*StatusNode)(nil) + +func (s *StatusNode) Level() Level { + return s.LastLevel +} + +func (s *StatusNode) Timestamp() time.Time { + return s.UpdateTimestamp +} + +func (s *StatusNode) JSON() ([]byte, error) { + return json.MarshalIndent(s, "", " ") +} + +func (s *StatusNode) allOk() bool { + return s.LastLevel == StatusOK +} + +func (s *StatusNode) writeTo(w io.Writer, d int) { + if len(s.SubStatuses) == 0 { + since := "never" + if !s.UpdateTimestamp.IsZero() { + since = duration.HumanDuration(time.Since(s.UpdateTimestamp)) + " ago" + } + fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t(x%d)\n", strings.Repeat("\t", d), s.Name, s.LastLevel, s.Message, since, s.Count) + } else { + fmt.Fprintf(w, "%s%s\n", strings.Repeat("\t", d), s.Name) + for _, ss := range s.SubStatuses { + ss.writeTo(w, d+1) + } + } +} + +func (s *StatusNode) StringIndent(ident int) string { + if s == nil { + return "" + } + buf := bytes.NewBuffer(nil) + w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + s.writeTo(w, ident) + w.Flush() + return buf.String() +} + +func (s *StatusNode) String() string { + return s.Message +} + +func (s *subreporterBase) getStatusTreeLocked(nid string) *StatusNode { + if children, ok := s.idToChildren[nid]; ok { + rn := s.nodes[nid] + n := &StatusNode{ + ID: nid, + Message: rn.Message, + Name: rn.name, + UpdateTimestamp: rn.Timestamp, + Count: rn.count, + } + if err := rn.Error; err != nil { + n.Error = err.Error() + } + allok := true + childIDs := maps.Keys(children) + sort.Strings(childIDs) + for _, child := range childIDs { + cn := s.getStatusTreeLocked(child) + if cn == nil { + log.Errorf("failed to get status for node %s", child) + continue + } + n.SubStatuses = append(n.SubStatuses, cn) + if !cn.allOk() { + allok = false + } + } + // If this is not a leaf and all children are ok then report ok. + // case 1: Non-reporter, has no children, should be ok? + // case 2: Non-reporter, has children, defer down to children. + if rn.isReporter { + n.LastLevel = rn.Level + } else { + if allok { + n.LastLevel = StatusOK + } else { + n.LastLevel = StatusDegraded + } + } + + return n + } + return nil +} + +type node struct { + id string + name string + parentID string + isReporter bool + count int + refs int + Message string + Error error + nodeUpdate +} +type nodeUpdate struct { + Level + Timestamp time.Time +} + +func (b *subreporterBase) removeRefLocked(id string) { + if _, ok := b.nodes[id]; ok { + if b.nodes[id].refs > 0 { + b.nodes[id].refs-- + } + } +} + +func (b *subreporterBase) addRefLocked(id string) { + if _, ok := b.nodes[id]; ok { + b.nodes[id].refs++ + } +} + +// subReporter represents both reporter "leaf" nodes and intermediate +// "scope" nodes. +// subReporter only has a pointer to the base, thus copying a subReporter +// by value yields the same "reporter". +type subReporter struct { + base *subreporterBase + // Triggers realization asynchronously, should not hold lock when calling. + scheduleRealize func() + // Triggers realization synchronously, base lock must be held when calling. + // Use for final status flushes. + realizeSync func() + + closeReconciler func() + id string + name string +} + +const logReporterID = "reporterID" + +func (s *subReporter) OK(message string) { + if err := s.base.setStatus(s.id, StatusOK, message, nil); err != nil { + if errors.Is(err, errReporterStopped) { + log.WithError(err).WithField(logReporterID, s.id).Debug("could not set OK status on subreporter") + } else { + log.WithError(err).WithField(logReporterID, s.id).Warn("could not set OK status on subreporter") + } + return + } + + s.scheduleRealize() +} + +func (s *subReporter) Degraded(message string, err error) { + if err := s.base.setStatus(s.id, StatusDegraded, message, err); err != nil { + if errors.Is(err, errReporterStopped) { + log.WithError(err).WithField(logReporterID, s.id).Debug("could not set degraded status on subreporter") + } else { + log.WithError(err).WithField(logReporterID, s.id).Warn("could not set degraded status on subreporter") + } + return + } + s.scheduleRealize() +} + +// Stopped marks the subreporter as stopped by removing it from the tree. +// Stopped reporters can immediately be removed from the tree, since they do +// not have any children. +func (s *subReporter) Stopped(message string) { + s.base.Lock() + s.base.removeTreeLocked(s.id) + s.base.Unlock() + s.scheduleRealize() +} + +type noopReporter struct{} + +func (s *noopReporter) OK(message string) {} +func (s *noopReporter) Degraded(message string, err error) {} +func (s *noopReporter) Stopped(message string) {} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/hive.go b/vendor/github.com/cilium/cilium/pkg/hive/hive.go index df9e7426b2..e81a64ea42 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/hive.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/hive.go @@ -5,6 +5,7 @@ package hive import ( "context" + "errors" "fmt" "os" "os/signal" @@ -17,9 +18,9 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" "go.uber.org/dig" - "go.uber.org/multierr" "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/hive/metrics" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" ) @@ -54,7 +55,7 @@ type Hive struct { startTimeout, stopTimeout time.Duration flags *pflag.FlagSet viper *viper.Viper - lifecycle *DefaultLifecycle + lifecycle cell.Lifecycle populated bool invokes []func() error configOverrides []any @@ -78,7 +79,7 @@ func New(cells ...cell.Cell) *Hive { startTimeout: defaultStartTimeout, stopTimeout: defaultStopTimeout, flags: pflag.NewFlagSet("", pflag.ContinueOnError), - lifecycle: &DefaultLifecycle{}, + lifecycle: &cell.DefaultLifecycle{}, shutdown: make(chan error, 1), configOverrides: nil, } @@ -87,12 +88,28 @@ func New(cells ...cell.Cell) *Hive { log.WithError(err).Fatal("Failed to provide default objects") } + if err := metrics.Cell.Apply(h.container); err != nil { + log.WithError(err).Fatal("Failed to apply Hive metrics cell") + } + // Use a single health provider for all cells, which is used to create // module scoped health reporters. - if err := h.container.Provide(func(lc Lifecycle) cell.Health { + if err := h.container.Provide(func(healthMetrics *metrics.HealthMetrics, lc cell.Lifecycle) cell.Health { hp := cell.NewHealthProvider() - lc.Append(Hook{ - OnStop: func(ctx HookContext) error { + updateStats := func() { + for l, c := range hp.Stats() { + healthMetrics.HealthStatusGauge.WithLabelValues(strings.ToLower(string(l))).Set(float64(c)) + } + } + lc.Append(cell.Hook{ + OnStart: func(ctx cell.HookContext) error { + updateStats() + hp.Subscribe(ctx, func(u cell.Update) { + updateStats() + }, func(err error) {}) + return nil + }, + OnStop: func(ctx cell.HookContext) error { return hp.Stop(ctx) }, }) @@ -146,21 +163,23 @@ func (h *Hive) Viper() *viper.Viper { type defaults struct { dig.Out - Flags *pflag.FlagSet - Lifecycle Lifecycle - Logger logrus.FieldLogger - Shutdowner Shutdowner - InvokerList cell.InvokerList + Flags *pflag.FlagSet + Lifecycle cell.Lifecycle + Logger logrus.FieldLogger + Shutdowner Shutdowner + InvokerList cell.InvokerList + EmptyFullModuleID cell.FullModuleID } func (h *Hive) provideDefaults() error { return h.container.Provide(func() defaults { return defaults{ - Flags: h.flags, - Lifecycle: h.lifecycle, - Logger: log, - Shutdowner: h, - InvokerList: h, + Flags: h.flags, + Lifecycle: h.lifecycle, + Logger: log, + Shutdowner: h, + InvokerList: h, + EmptyFullModuleID: nil, } }) } @@ -187,27 +206,23 @@ func (h *Hive) Run() error { startCtx, cancel := context.WithTimeout(context.Background(), h.startTimeout) defer cancel() - var errors []error - + var errs error if err := h.Start(startCtx); err != nil { - errors = append(errors, fmt.Errorf("failed to start: %w", err)) + errs = errors.Join(errs, fmt.Errorf("failed to start: %w", err)) } // If start was successful, wait for Shutdown() or interrupt. - if len(errors) == 0 { - shutdownErr := h.waitForSignalOrShutdown() - if shutdownErr != nil { - errors = append(errors, shutdownErr) - } + if errs == nil { + errs = errors.Join(errs, h.waitForSignalOrShutdown()) } stopCtx, cancel := context.WithTimeout(context.Background(), h.stopTimeout) defer cancel() if err := h.Stop(stopCtx); err != nil { - errors = append(errors, fmt.Errorf("failed to stop: %w", err)) + errs = errors.Join(errs, fmt.Errorf("failed to stop: %w", err)) } - return multierr.Combine(errors...) + return errs } func (h *Hive) waitForSignalOrShutdown() error { @@ -317,11 +332,9 @@ func (h *Hive) fatalOnTimeout(ctx context.Context) chan struct{} { // Context was cancelled. Give 5 more seconds and then // go fatal. - time.Sleep(5 * time.Second) - select { case <-terminated: - default: + case <-time.After(5 * time.Second): log.Fatal("Start or stop failed to finish on time, aborting forcefully.") } }() diff --git a/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go b/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go index c216e41db0..6fef8842e9 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go @@ -5,7 +5,8 @@ package internal import ( "fmt" - "path" + "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -13,7 +14,7 @@ import ( ) var ( - baseNameRegex = regexp.MustCompile(`^github\.com/cilium/cilium/[\w\/]+/`) + baseNameRegex = regexp.MustCompile(`github\.com/cilium/cilium/[\w\/]+/`) ) func TrimName(name string) string { @@ -30,7 +31,25 @@ func FuncNameAndLocation(fn any) string { name := TrimName(f.Name()) name = strings.TrimSuffix(name, "-fm") if file != "" { - return fmt.Sprintf("%s (%s:%d)", name, path.Base(file), line) + return fmt.Sprintf("%s (%s:%d)", name, usefulPathSegment(file), line) } return name } + +// Purely a heuristic. +var commonRoots = map[string]struct{}{ + "pkg": {}, + "cmd": {}, +} + +func usefulPathSegment(file string) string { + p := filepath.Clean(file) + segs := strings.Split(p, string(os.PathSeparator)) + for i := len(segs) - 1; i > 0; i-- { + if _, ok := commonRoots[segs[i]]; ok { + segs = segs[i:] + break + } + } + return filepath.Join(segs...) +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/metrics/health.go b/vendor/github.com/cilium/cilium/pkg/hive/metrics/health.go new file mode 100644 index 0000000000..84d72b1e9f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hive/metrics/health.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "github.com/cilium/cilium/pkg/hive/cell" + "github.com/cilium/cilium/pkg/metrics/metric" +) + +var Cell = cell.Metric(newHealthMetrics) + +type HealthMetrics struct { + HealthStatusGauge metric.Vec[metric.Gauge] +} + +func newHealthMetrics() *HealthMetrics { + return &HealthMetrics{ + HealthStatusGauge: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: "hive_health_status_levels", + Namespace: "cilium", + Subsystem: "hive", + Name: "status", + Help: "Counts of health status levels of Hive components", + }, []string{"status"}), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/inctimer/inctimer.go b/vendor/github.com/cilium/cilium/pkg/inctimer/inctimer.go new file mode 100644 index 0000000000..072c02a56e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/inctimer/inctimer.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package inctimer + +import "time" + +// IncTimer should be the preferred mechanism over +// calling `time.After` when wanting an `After`-like +// function in a loop. This prevents memory build up +// as the `time.After` method creates a new timer +// instance every time it is called, and it is not +// garbage collected until after it fires. Conversely, +// IncTimer only uses one timer and correctly stops +// the timer, clears its channel, and resets it +// everytime that `After` is called. +type IncTimer interface { + After(time.Duration) <-chan time.Time +} + +type incTimer struct { + t *time.Timer +} + +// New creates a new IncTimer and a done function. +// IncTimer only uses one timer and correctly stops +// the timer, clears the channel, and resets it every +// time the `After` function is called. +// WARNING: Concurrent use is not expected. The use +// of this timer should be for only one goroutine. +func New() (IncTimer, func() bool) { + it := &incTimer{} + return it, it.stop +} + +// stop returns true if a scheduled timer has been stopped before execution. +func (it *incTimer) stop() bool { + if it.t == nil { + return false + } + return it.t.Stop() +} + +// After returns a channel that will fire after +// the specified duration. +func (it *incTimer) After(d time.Duration) <-chan time.Time { + // Stop the previous timer (if any) to garbage collect it. + // The old timer channel will be garbage collected even if not drained. + it.stop() + + // We have to create a new timer for each invocation, because it is not + // possible to safely use https://golang.org/pkg/time/#Timer.Reset if we + // do not know if the timer channel has already been drained or not (which + // is the case here, as the client might have drained the channel already). + // Even after stopping a timer, it's not safe to attempt to drain its + // timer channel with a default case (for the case where the client has + // drained the channel already), as there is a small window where a timer + // is considered expired, but the channel has not received a value yet [1]. + // This would cause us to erroneously take the default case (assuming the + // channel has been drained by the client), when in fact the channel just + // has not received a value yet. Because the two cases (client has drained + // vs. value not received yet) are indistinguishable for us, we cannot use + // Timer.Reset and need to create a new timer. + // + // [1] The reason why this small window occurs, is because the Go runtime + // will remove a timer from the heap and and mark it as deleted _before_ + // it actually executes the timer function f: + // https://github.com/golang/go/blob/go1.16/src/runtime/time.go#L876 + // This causes t.Stop to report the timer as already expired while it is + // in fact currently running: + // https://github.com/golang/go/blob/go1.16/src/runtime/time.go#L352 + it.t = time.NewTimer(d) + return it.t.C +} + +// After wraps the time.After function to get around the /timeafter linter +// warning for cases where it is inconvenient to use the instantiated version. +func After(d time.Duration) <-chan time.Time { + return time.After(d) +} diff --git a/vendor/github.com/cilium/cilium/pkg/ip/ip.go b/vendor/github.com/cilium/cilium/pkg/ip/ip.go index 14ef09eb14..fe1bb97df2 100644 --- a/vendor/github.com/cilium/cilium/pkg/ip/ip.go +++ b/vendor/github.com/cilium/cilium/pkg/ip/ip.go @@ -739,22 +739,6 @@ func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, [ return left, excludeList, right } -// KeepUniqueIPs transforms the provided multiset of IPs into a single set, -// lexicographically sorted via a byte-wise comparison of the IP slices (i.e. -// IPv4 addresses show up before IPv6). -// The slice is manipulated in-place destructively. -func KeepUniqueIPs(ips []net.IP) []net.IP { - return slices.SortedUniqueFunc( - ips, - func(i, j int) bool { - return bytes.Compare(ips[i], ips[j]) == -1 - }, - func(a, b net.IP) bool { - return a.Equal(b) - }, - ) -} - // KeepUniqueAddrs transforms the provided multiset of IP addresses into a // single set, lexicographically sorted via comparison of the addresses using // netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6). @@ -859,25 +843,33 @@ func SortIPList(ipList []net.IP) { }) } +func SortAddrList(ipList []netip.Addr) { + sort.Slice(ipList, func(i, j int) bool { + return ipList[i].Compare(ipList[j]) < 0 + }) +} + // getSortedIPList returns a new net.IP slice in which the IPs are sorted. func getSortedIPList(ipList []net.IP) []net.IP { sortedIPList := make([]net.IP, len(ipList)) - for i := 0; i < len(ipList); i++ { - sortedIPList[i] = ipList[i] - } - + copy(sortedIPList, ipList) SortIPList(sortedIPList) + return sortedIPList } -// SortedIPListsAreEqual compares two lists of sorted IPs. If any differ it returns -// false. -func SortedIPListsAreEqual(a, b []net.IP) bool { +// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same +// without considering the order of the IPs in the list. The function will first +// attempt to sort both the IP lists and then validate equality for sorted lists. +func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool { // The IP set is definitely different if the lengths are different. - if len(a) != len(b) { + if len(ipList1) != len(ipList2) { return false } + a := getSortedIPList(ipList1) + b := getSortedIPList(ipList2) + // Lengths are equal, so each member in one set must be in the other // If any IPs at the same index differ the sorted IP list are not equal. for i := range a { @@ -888,21 +880,6 @@ func SortedIPListsAreEqual(a, b []net.IP) bool { return true } -// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same -// without considering the order of the IPs in the list. The function will first -// attempt to sort both the IP lists and then validate equality for sorted lists. -func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool { - // The IP set is definitely different if the lengths are different. - if len(ipList1) != len(ipList2) { - return false - } - - sortedIPList1 := getSortedIPList(ipList1) - sortedIPList2 := getSortedIPList(ipList2) - - return SortedIPListsAreEqual(sortedIPList1, sortedIPList2) -} - // GetIPFromListByFamily returns a single IP address of the provided family from a list // of ip addresses. func GetIPFromListByFamily(ipList []net.IP, v4Family bool) net.IP { diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go index 2d82d82bce..90f2b9bb7c 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go @@ -23,9 +23,6 @@ const ( // option.IPAM IPAMClusterPool = "cluster-pool" - // IPAMClusterPoolV2 is the value to select cluster pool version 2 - IPAMClusterPoolV2 = "cluster-pool-v2beta" - // IPAMMultiPool is the value to select the multi pool IPAM mode IPAMMultiPool = "multi-pool" @@ -49,6 +46,3 @@ const ( // prefixes. Every /28 prefix contains 16 IP addresses. // See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html#ec2-prefix-basics for more details const ENIPDBlockSizeIPv4 = 16 - -// PoolDefault is the default IP pool from which to allocate. -const PoolDefault = "default" diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go index e85b7fa0d5..1e50603073 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go @@ -5,6 +5,7 @@ package types import ( "fmt" + "net/netip" "github.com/cilium/cilium/pkg/cidr" "github.com/cilium/cilium/pkg/lock" @@ -56,6 +57,19 @@ type AllocationMap map[string]AllocationIP // +kubebuilder:validation:Format=cidr type IPAMPodCIDR string +func (c *IPAMPodCIDR) ToPrefix() (*netip.Prefix, error) { + if c == nil { + return nil, fmt.Errorf("nil ipam cidr") + } + + prefix, err := netip.ParsePrefix(string(*c)) + if err != nil { + return nil, fmt.Errorf("failed to parse ipam cidr %v: %w", c, err) + } + + return &prefix, nil +} + // IPAMPoolAllocation describes an allocation of an IPAM pool from the operator to the // node. It contains the assigned PodCIDRs allocated from this pool type IPAMPoolAllocation struct { @@ -157,25 +171,6 @@ type IPAMSpec struct { // // +kubebuilder:validation:Minimum=0 MaxAboveWatermark int `json:"max-above-watermark,omitempty"` - - // PodCIDRAllocationThreshold defines the minimum number of free IPs which - // must be available to this node via its pod CIDR pool. If the total number - // of IP addresses in the pod CIDR pool is less than this value, the pod - // CIDRs currently in-use by this node will be marked as depleted and - // cilium-operator will allocate a new pod CIDR to this node. - // This value effectively defines the buffer of IP addresses available - // immediately without requiring cilium-operator to get involved. - // - // +kubebuilder:validation:Minimum=0 - PodCIDRAllocationThreshold int `json:"pod-cidr-allocation-threshold,omitempty"` - - // PodCIDRReleaseThreshold defines the maximum number of free IPs which may - // be available to this node via its pod CIDR pool. While the total number - // of free IP addresses in the pod CIDR pool is larger than this value, - // cilium-agent will attempt to release currently unused pod CIDRs. - // - // +kubebuilder:validation:Minimum=0 - PodCIDRReleaseThreshold int `json:"pod-cidr-release-threshold,omitempty"` } // IPReleaseStatus defines the valid states in IP release handshake @@ -366,6 +361,9 @@ type Interface interface { // ForeachAddress must iterate over all addresses of the interface and // call fn for each address ForeachAddress(instanceID string, fn AddressIterator) error + + // DeepCopyInterface returns a deep copy of the underlying interface type. + DeepCopyInterface() Interface } // InterfaceRevision is the configurationr revision of a network interface. It @@ -410,6 +408,13 @@ func NewInstanceMap() *InstanceMap { return &InstanceMap{data: map[string]*Instance{}} } +// UpdateInstance updates the interfaces map for a particular instance. +func (m *InstanceMap) UpdateInstance(instanceID string, instance *Instance) { + m.mutex.Lock() + m.data[instanceID] = instance + m.mutex.Unlock() +} + // Update updates the definition of an interface for a particular instance. If // the interface is already known, the definition is updated, otherwise the // interface is added to the instance. @@ -540,6 +545,7 @@ func (m *InstanceMap) DeepCopy() *InstanceMap { c := NewInstanceMap() m.ForeachInterface("", func(instanceID, interfaceID string, rev InterfaceRevision) error { // c is not exposed yet, we can access it without locking it + rev.Resource = rev.Resource.DeepCopyInterface() c.updateLocked(instanceID, rev) return nil }) diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go index b27fd1c459..9749444d65 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go @@ -204,12 +204,6 @@ func (in *IPAMSpec) DeepEqual(other *IPAMSpec) bool { if in.MaxAboveWatermark != other.MaxAboveWatermark { return false } - if in.PodCIDRAllocationThreshold != other.PodCIDRAllocationThreshold { - return false - } - if in.PodCIDRReleaseThreshold != other.PodCIDRReleaseThreshold { - return false - } return true } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go index aa9c0318b1..4aa8ac5169 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go @@ -61,10 +61,29 @@ const ( // documentation add the label for every resource object. AppKubernetes = "app.kubernetes.io" + // StatefulSetPodNameLabel is the label name which, in-tree, is used to + // automatically label Pods that are owned by StatefulSets with their name, + // so that one can attach a Service to a specific Pod in the StatefulSet. + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" + + // StatefulSetPodIndexLabel is the label name which, in-tree, is used to + // automatically label Pods that are owned by StatefulSets with their + // ordinal index. + StatefulSetPodIndexLabel = "apps.kubernetes.io/pod-index" + + // IndexedJobCompletionIndexLabel is the label name which, in-tree, is used + // to automatically label Pods that are owned by Indexed Jobs with their + // completion index. + IndexedJobCompletionIndexLabel = "batch.kubernetes.io/job-completion-index" + // CtrlPrefixPolicyStatus is the prefix used for the controllers set up // to sync the CNP with kube-apiserver. CtrlPrefixPolicyStatus = "sync-cnp-policy-status" + // BatchJobControllerUID is one of the labels that is available on a Job + // https://kubernetes.io/docs/concepts/workloads/controllers/job/#job-labels + BatchJobControllerUID = "batch.kubernetes.io/controller-uid" + // CiliumIdentityAnnotationDeprecated is the previous annotation key used to map to an endpoint's security identity. CiliumIdentityAnnotationDeprecated = "cilium-identity" ) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go index 7585346d12..f4937a4a7f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go @@ -15,5 +15,5 @@ const ( // // Maintainers: Run ./Documentation/check-crd-compat-table.sh for each release // Developers: Bump patch for each change in the CRD schema. - CustomResourceDefinitionSchemaVersion = "1.26.10" + CustomResourceDefinitionSchemaVersion = "1.28.3" ) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go index 08fb21a63a..28cb2e19ea 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go @@ -82,14 +82,16 @@ func getEndpointSelector(namespace string, labelSelector *slim_metav1.LabelSelec // Those pods don't have any labels, so they don't have a namespace label either. // Don't add a namespace label to those endpoint selectors, or we wouldn't be // able to match on those pods. - if !matchesInit && !es.HasKey(podPrefixLbl) && !es.HasKey(podAnyPrefixLbl) { + if !es.HasKey(podPrefixLbl) && !es.HasKey(podAnyPrefixLbl) { if namespace == "" { // For a clusterwide policy if a namespace is not specified in the labels we add // a selector to only match endpoints that contains a namespace label. // This is to make sure that we are only allowing traffic for cilium managed k8s endpoints // and even if a wildcard is provided in the selector we don't proceed with a truly // empty(allow all) endpoint selector for the policy. - es.AddMatchExpression(podPrefixLbl, slim_metav1.LabelSelectorOpExists, []string{}) + if !matchesInit { + es.AddMatchExpression(podPrefixLbl, slim_metav1.LabelSelectorOpExists, []string{}) + } } else { es.AddMatch(podPrefixLbl, namespace) } @@ -301,11 +303,11 @@ func ParseToCiliumRule(namespace, name string, uid types.UID, r *api.Rule) *api. // the policy is being stored, thus we add the namespace to // the MatchLabels map. // - // Policies applying on initializing pods are a special case. - // Those pods don't have any labels, so they don't have a namespace label either. - // Don't add a namespace label to those endpoint selectors, or we wouldn't be - // able to match on those pods. - if !retRule.EndpointSelector.HasKey(podInitLbl) && namespace != "" { + // Policies applying to all namespaces are a special case. + // Such policies can match on any traffic from Pods or Nodes, + // so it wouldn't make sense to inject a namespace match for + // those policies. + if namespace != "" { userNamespace, present := r.EndpointSelector.GetMatch(podPrefixLbl) if present && !namespacesAreValid(namespace, userNamespace) { log.WithFields(logrus.Fields{ diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go index 8628e7fdc1..48823bf8be 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go @@ -23,7 +23,6 @@ type CiliumClusterwideEnvoyConfig struct { metav1.ObjectMeta `json:"metadata"` // +k8s:openapi-gen=false - // +kubebuilder:validation:Type=object Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go index 12eb1312aa..ffff813a40 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go @@ -32,7 +32,6 @@ type CiliumEnvoyConfig struct { metav1.ObjectMeta `json:"metadata"` // +k8s:openapi-gen=false - // +kubebuilder:validation:Type=object Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go index 35f15d905f..8d6081182e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go @@ -47,8 +47,6 @@ type CiliumExternalWorkload struct { // CiliumExternalWorkloadSpec specifies the configurations for redirecting traffic // within a workload. -// -// +kubebuilder:validation:Type=object type CiliumExternalWorkloadSpec struct { // IPv4AllocCIDR is the range of IPv4 addresses in the CIDR format that the external workload can // use to allocate IP addresses for the tunnel device and the health endpoint. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go index aafc5d40a4..63bb1b5e1f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go @@ -151,8 +151,6 @@ type RedirectBackend struct { // CiliumLocalRedirectPolicySpec specifies the configurations for redirecting traffic // within a node. -// -// +kubebuilder:validation:Type=object type CiliumLocalRedirectPolicySpec struct { // RedirectFrontend specifies frontend configuration to redirect traffic from. // It can not be empty. @@ -176,8 +174,6 @@ type CiliumLocalRedirectPolicySpec struct { // CiliumLocalRedirectPolicyStatus is the status of a Local Redirect Policy. type CiliumLocalRedirectPolicyStatus struct { // TODO Define status(aditi) - // - // +kubebuilder:validation:Type=object OK bool `json:"ok,omitempty"` } @@ -208,7 +204,7 @@ func (pInfo *PortInfo) SanitizePortInfo(checkNamedPort bool) (uint16, string, lb } else { p, err := strconv.ParseUint(pInfo.Port, 0, 16) if err != nil { - return pInt, pName, protocol, fmt.Errorf("unable to parse port: %v", err) + return pInt, pName, protocol, fmt.Errorf("unable to parse port: %w", err) } if p == 0 { return pInt, pName, protocol, fmt.Errorf("port cannot be 0") diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go index af821e8dfb..4a342e9422 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go @@ -4,6 +4,7 @@ package v2 import ( + "net" "sort" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,11 +21,10 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=false // +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep} -// +kubebuilder:printcolumn:JSONPath=".status.id",description="Cilium endpoint id",name="Endpoint ID",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Cilium identity id",name="Identity ID",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string -// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string -// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string +// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer +// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string,priority=1 // +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string @@ -347,6 +347,11 @@ type NodeSpec struct { // some other means of identification. InstanceID string `json:"instance-id,omitempty"` + // BootID is a unique node identifier generated on boot + // + // +kubebuilder:validation:Optional + BootID string `json:"bootid,omitempty"` + // Addresses is the list of all node addresses. // // +kubebuilder:validation:Optional @@ -465,3 +470,22 @@ func (n *CiliumNode) InstanceID() (instanceID string) { } return } + +func (n NodeAddress) ToString() string { + return n.IP +} + +func (n NodeAddress) AddrType() addressing.AddressType { + return n.Type +} + +// GetIP returns one of the CiliumNode's IP addresses available with the +// following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// An error is returned if GetIP fails to extract an IP from the CiliumNode +// based on the provided address family. +func (n *CiliumNode) GetIP(ipv6 bool) net.IP { + return addressing.ExtractNodeIP[NodeAddress](n.Spec.Addresses, ipv6) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go index 7c2a855220..1f363e5767 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go @@ -1070,6 +1070,9 @@ func (in *NodeSpec) DeepEqual(other *NodeSpec) bool { if in.InstanceID != other.InstanceID { return false } + if in.BootID != other.BootID { + return false + } if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) { in, other := &in.Addresses, &other.Addresses if other == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go new file mode 100644 index 0000000000..af4e4d57c0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// BGPAdvertisementType defines type of advertisement. +// +// Note list of supported advertisements is not exhaustive and can be extended in the future. +// Consumer of this API should be able to handle unknown values. +// +// +kubebuilder:validation:Enum=PodCIDR;CiliumPodIPPool;CiliumLoadBalancerIP +type BGPAdvertisementType string + +const ( + // PodCIDRAdvert when configured, Cilium will advertise pod CIDRs to BGP peers. + PodCIDRAdvert BGPAdvertisementType = "PodCIDR" + + // CiliumPodIPPoolAdvert when configured, Cilium will advertise prefixes from CiliumPodIPPools to BGP peers. + CiliumPodIPPoolAdvert BGPAdvertisementType = "CiliumPodIPPool" + + // CiliumLoadBalancerIPAdvert when configured, Cilium will advertise load balancer services IPs to BGP peers. + // The loadBalancerClass for a service must be nil or specify a class supported by Cilium, + // e.g. "io.cilium/bgp-control-plane". + // + // Refer to the following document for additional details regarding load balancer + // classes: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + CiliumLoadBalancerIPAdvert BGPAdvertisementType = "CiliumLoadBalancerIP" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpadvertisement",path="ciliumbgpadvertisements",scope="Cluster",shortName={cbgpadvert} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +// CiliumBGPAdvertisement is the Schema for the ciliumbgpadvertisements API +type CiliumBGPAdvertisement struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + Spec CiliumBGPAdvertisementSpec `json:"spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPAdvertisementList contains a list of CiliumBGPAdvertisement +type CiliumBGPAdvertisementList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPAdvertisement. + Items []CiliumBGPAdvertisement `json:"items"` +} + +type CiliumBGPAdvertisementSpec struct { + // Advertisements is a list of BGP advertisements. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Advertisements []Advertisement `json:"advertisements"` +} + +// Advertisement defines which routes Cilium should advertise to BGP peers. Optionally, additional attributes can be +// set to the advertised routes. +type Advertisement struct { + // AdvertisementType defines type of advertisement which has to be advertised. + // + // +kubebuilder:validation:Required + AdvertisementType BGPAdvertisementType `json:"advertisementType"` + + // Selector is a label selector to select objects of the type specified by AdvertisementType. + // If not specified, all objects of the type specified by AdvertisementType are selected for advertisement. + // + // +kubebuilder:validation:Optional + Selector *slimv1.LabelSelector `json:"selector,omitempty"` + + // Attributes defines additional attributes to set to the advertised routes. + // If not specified, no additional attributes are set. + // + // +kubebuilder:validation:Optional + Attributes *CiliumBGPAttributes `json:"attributes,omitempty"` +} + +// CiliumBGPAttributes defines additional attributes to set to the advertised NLRIs. +type CiliumBGPAttributes struct { + // Community sets the community attribute in the route. + // If not specified, no community attribute is set. + // + // +kubebuilder:validation:Optional + Community *BGPCommunities `json:"community,omitempty"` + + // LocalPreference sets the local preference attribute in the route. + // If not specified, no local preference attribute is set. + // + // +kubebuilder:validation:Optional + LocalPreference *int64 `json:"localPreference,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go new file mode 100644 index 0000000000..dad7bb87f2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpclusterconfig",path="ciliumbgpclusterconfigs",scope="Cluster",shortName={cbgpcluster} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +// CiliumBGPClusterConfig is the Schema for the CiliumBGPClusterConfig API +type CiliumBGPClusterConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec defines the desired cluster configuration of the BGP control plane. + Spec CiliumBGPClusterConfigSpec `json:"spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPClusterConfigList is a list of CiliumBGPClusterConfig objects. +type CiliumBGPClusterConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPClusterConfig. + Items []CiliumBGPClusterConfig `json:"items"` +} + +type CiliumBGPClusterConfigSpec struct { + // NodeSelector selects a group of nodes where this BGP Cluster + // config applies. + // If empty / nil this config applies to all nodes. + // + // +kubebuilder:validation:Optional + NodeSelector *slimv1.LabelSelector `json:"nodeSelector,omitempty"` + + // A list of CiliumBGPInstance(s) which instructs + // the BGP control plane how to instantiate virtual BGP routers. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPInstance `json:"bgpInstances"` +} + +type CiliumBGPInstance struct { + // Name is the name of the BGP instance. It is a unique identifier for the BGP instance + // within the cluster configuration. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // LocalASN is the ASN of this BGP instance. + // Supports extended 32bit ASNs. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + LocalASN *int64 `json:"localASN,omitempty"` + + // Peers is a list of neighboring BGP peers for this virtual router + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Peers []CiliumBGPPeer `json:"peers,omitempty"` +} + +type CiliumBGPPeer struct { + // Name is the name of the BGP peer. It is a unique identifier for the peer within the BGP instance. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // PeerAddress is the IP address of the neighbor. + // Supports IPv4 and IPv6 addresses. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + PeerAddress *string `json:"peerAddress,omitempty"` + + // PeerASN is the ASN of the peer BGP router. + // Supports extended 32bit ASNs. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + PeerASN *int64 `json:"peerASN,omitempty"` + + // PeerConfigRef is a reference to a peer configuration resource. + // If not specified, the default BGP configuration is used for this peer. + // + // +kubebuilder:validation:Optional + PeerConfigRef *PeerConfigReference `json:"peerConfigRef,omitempty"` +} + +// PeerConfigReference is a reference to a peer configuration resource. +type PeerConfigReference struct { + // Group is the group of the peer config resource. + // If not specified, the default of "cilium.io" is used. + // + // +kubebuilder:validation:Optional + // +kubebuilder:default="cilium.io" + Group string `json:"group"` + + // Kind is the kind of the peer config resource. + // If not specified, the default of "CiliumBGPPeerConfig" is used. + // + // +kubebuilder:validation:Optional + // +kubebuilder:default="CiliumBGPPeerConfig" + Kind string `json:"kind"` + + // Name is the name of the peer config resource. + // Name refers to the name of a Kubernetes object (typically a CiliumBGPPeerConfig). + // + // +kubebuilder:validation:Required + Name string `json:"name"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go new file mode 100644 index 0000000000..4d788cdac5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpnodeconfigoverride",path="ciliumbgpnodeconfigoverrides",scope="Cluster",shortName={cbgpnodeoverride} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +// CiliumBGPNodeConfigOverride is used to overrides some of the BGP configurations which are node local. +// Users can user this resource to override auto-generated BGP settings for the node. +type CiliumBGPNodeConfigOverride struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec is the specification of the desired behavior of the CiliumBGPNodeConfigOverride. + Spec CiliumBGPNodeConfigOverrideSpec `json:"spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPNodeConfigOverrideList is a list of CiliumBGPNodeConfigOverride objects. +type CiliumBGPNodeConfigOverrideList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPNodeConfigOverride. + Items []CiliumBGPNodeConfigOverride `json:"items"` +} + +type CiliumBGPNodeConfigOverrideSpec struct { + // NodeRef is the name of the node for which the BGP configuration is overridden. + // + // +kubebuilder:validation:Required + NodeRef string `json:"nodeRef"` + + // BGPInstances is a list of BGP instances to override. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPNodeConfigInstanceOverride `json:"bgpInstances"` +} + +// CiliumBGPNodeConfigInstanceOverride defines configuration options which can be overridden for a specific BGP instance. +type CiliumBGPNodeConfigInstanceOverride struct { + // Name is the name of the BGP instance for which the configuration is overridden. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // RouterID is BGP router id to use for this instance. It must be unique across all BGP instances. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Format=ipv4 + RouterID *string `json:"routerID,omitempty"` + + // LocalPort is port to use for this BGP instance. + // + // +kubebuilder:validation:Optional + LocalPort *int32 `json:"localPort,omitempty"` + + // Peers is a list of peer configurations to override. + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Peers []CiliumBGPNodeConfigPeerOverride `json:"peers,omitempty"` +} + +// CiliumBGPNodeConfigPeerOverride defines configuration options which can be overridden for a specific peer. +type CiliumBGPNodeConfigPeerOverride struct { + // Name is the name of the peer for which the configuration is overridden. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // LocalAddress is the IP address to use for connecting to this peer. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + LocalAddress *string `json:"localAddress,omitempty"` + + // LocalPort is source port to use for connecting to this peer. + // + // +kubebuilder:validation:Optional + LocalPort *int32 `json:"localPort,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go new file mode 100644 index 0000000000..bf85107e0b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpnodeconfig",path="ciliumbgpnodeconfigs",scope="Cluster",shortName={cbgpnode} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +// CiliumBGPNodeConfig is node local configuration for BGP agent. Name of the object should be node name. +// This resource will be created by Cilium operator and is read-only for the users. +type CiliumBGPNodeConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec is the specification of the desired behavior of the CiliumBGPNodeConfig. + Spec CiliumBGPNodeSpec `json:"spec"` + + // Status is the most recently observed status of the CiliumBGPNodeConfig. + Status CiliumBGPNodeStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPNodeConfigList is a list of CiliumBGPNodeConfig objects. +type CiliumBGPNodeConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPNodeConfig. + Items []CiliumBGPNodeConfig `json:"items"` +} + +type CiliumBGPNodeSpec struct { + // BGPInstances is a list of BGP router instances on the node. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPNodeInstance `json:"bgpInstances"` +} + +// CiliumBGPNodeInstance is a single BGP router instance configuration on the node. +type CiliumBGPNodeInstance struct { + // Name is the name of the BGP instance. This name is used to identify the BGP instance on the node. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // LocalASN is the ASN of this virtual router. + // Supports extended 32bit ASNs. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + LocalASN *int64 `json:"localASN,omitempty"` + + // RouterID is the BGP router ID of this virtual router. + // This configuration is derived from CiliumBGPNodeConfigOverride resource. + // + // If not specified, the router ID will be derived from the node local address. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Format=ipv4 + RouterID *string `json:"routerID,omitempty"` + + // LocalPort is the port on which the BGP daemon listens for incoming connections. + // + // If not specified, BGP instance will not listen for incoming connections. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + LocalPort *int32 `json:"localPort,omitempty"` + + // Peers is a list of neighboring BGP peers for this virtual router + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Peers []CiliumBGPNodePeer `json:"peers,omitempty"` +} + +type CiliumBGPNodePeer struct { + // Name is the name of the BGP peer. This name is used to identify the BGP peer for the BGP instance. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // PeerAddress is the IP address of the neighbor. + // Supports IPv4 and IPv6 addresses. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + PeerAddress *string `json:"peerAddress,omitempty"` + + // PeerASN is the ASN of the peer BGP router. + // Supports extended 32bit ASNs + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + PeerASN *int64 `json:"peerASN,omitempty"` + + // LocalAddress is the IP address of the local interface to use for the peering session. + // This configuration is derived from CiliumBGPNodeConfigOverride resource. If not specified, the local address will be used for setting up peering. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + LocalAddress *string `json:"localAddress,omitempty"` + + // PeerConfigRef is a reference to a peer configuration resource. + // If not specified, the default BGP configuration is used for this peer. + // + // +kubebuilder:validation:Optional + PeerConfigRef *PeerConfigReference `json:"peerConfigRef,omitempty"` +} + +// CiliumBGPNodeStatus is the status of the CiliumBGPNodeConfig. +type CiliumBGPNodeStatus struct { + // BGPInstances is the status of the BGP instances on the node. + // + // +kubebuilder:validation:Required + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPNodeInstanceStatus `json:"bgpInstances"` +} + +type CiliumBGPNodeInstanceStatus struct { + // Name is the name of the BGP instance. This name is used to identify the BGP instance on the node. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // LocalASN is the ASN of this BGP instance. + // + // +kubebuilder:validation:Optional + LocalASN *int64 `json:"localASN,omitempty"` + + // PeerStatuses is the state of the BGP peers for this BGP instance. + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + PeerStatuses []CiliumBGPNodePeerStatus `json:"peers,omitempty"` +} + +// CiliumBGPNodePeerStatus is the status of a BGP peer. +type CiliumBGPNodePeerStatus struct { + // Name is the name of the BGP peer. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // PeerAddress is the IP address of the neighbor. + // + // +kubebuilder:validation:Required + PeerAddress string `json:"peerAddress"` + + // PeerASN is the ASN of the neighbor. + // + // +kubebuilder:validation:Optional + PeerASN *int64 `json:"peerASN,omitempty"` + + // PeeringState is last known state of the peering session. + // + // +kubebuilder:validation:Optional + PeeringState *string `json:"peeringState,omitempty"` + + // Timers is the state of the negotiated BGP timers for this peer. + // + // +kubebuilder:validation:Optional + Timers *CiliumBGPTimersState `json:"timers,omitempty"` + + // Uptime is the time since the last peering session was established. + // + // +kubebuilder:validation:Optional + Uptime *string `json:"uptime,omitempty"` + + // RoutesReceived is the number of routes received from this peer. + // + // +kubebuilder:validation:Optional + RoutesReceived *int32 `json:"routesReceived,omitempty"` + + // RoutesAdvertised is the number of routes advertised to this peer. + // + // +kubebuilder:validation:Optional + RoutesAdvertised *int32 `json:"routesAdvertised,omitempty"` +} + +// CiliumBGPTimersState is the state of the negotiated BGP timers for a peer. +type CiliumBGPTimersState struct { + // AppliedHoldTimeSeconds is the negotiated hold time for this peer. + // + // +kubebuilder:validation:Optional + AppliedHoldTimeSeconds *int32 `json:"appliedHoldTimeSeconds,omitempty"` + + // AppliedKeepaliveSeconds is the negotiated keepalive time for this peer. + // + // +kubebuilder:validation:Optional + AppliedKeepaliveSeconds *int32 `json:"appliedKeepaliveSeconds,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go new file mode 100644 index 0000000000..8e1f7ecb3e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPPeerConfigList is a list of CiliumBGPPeer objects. +type CiliumBGPPeerConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPPeer. + Items []CiliumBGPPeerConfig `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeerconfig",path="ciliumbgppeerconfigs",scope="Cluster",shortName={cbgppeer} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +type CiliumBGPPeerConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec is the specification of the desired behavior of the CiliumBGPPeerConfig. + Spec CiliumBGPPeerConfigSpec `json:"spec"` +} + +type CiliumBGPPeerConfigSpec struct { + // Transport defines the BGP transport parameters for the peer. + // + // If not specified, the default transport parameters are used. + // + // +kubebuilder:validation:Optional + Transport *CiliumBGPTransport `json:"transport,omitempty"` + + // Timers defines the BGP timers for the peer. + // + // If not specified, the default timers are used. + // + // +kubebuilder:validation:Optional + Timers *CiliumBGPTimers `json:"timers,omitempty"` + + // AuthSecretRef is the name of the secret to use to fetch a TCP + // authentication password for this peer. + // + // If not specified, no authentication is used. + // + // +kubebuilder:validation:Optional + AuthSecretRef *string `json:"authSecretRef,omitempty"` + + // GracefulRestart defines graceful restart parameters which are negotiated + // with this peer. + // + // If not specified, the graceful restart capability is disabled. + // + // +kubebuilder:validation:Optional + GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"` + + // EBGPMultihopTTL controls the multi-hop feature for eBGP peers. + // Its value defines the Time To Live (TTL) value used in BGP + // packets sent to the peer. + // + // If not specified, EBGP multihop is disabled. This field is ignored for iBGP neighbors. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=255 + // +kubebuilder:default=1 + EBGPMultihop *int32 `json:"ebgpMultihop,omitempty"` + + // Families, if provided, defines a set of AFI/SAFIs the speaker will + // negotiate with it's peer. + // + // If not specified, the default families of IPv6/unicast and IPv4/unicast will be created. + // + // +kubebuilder:validation:Optional + Families []CiliumBGPFamilyWithAdverts `json:"families,omitempty"` +} + +// CiliumBGPFamily represents a AFI/SAFI address family pair. +type CiliumBGPFamily struct { + // Afi is the Address Family Identifier (AFI) of the family. + // + // +kubebuilder:validation:Enum=ipv4;ipv6;l2vpn;ls;opaque + // +kubebuilder:validation:Required + Afi string `json:"afi"` + + // Safi is the Subsequent Address Family Identifier (SAFI) of the family. + // + // +kubebuilder:validation:Enum=unicast;multicast;mpls_label;encapsulation;vpls;evpn;ls;sr_policy;mup;mpls_vpn;mpls_vpn_multicast;route_target_constraints;flowspec_unicast;flowspec_vpn;key_value + // +kubebuilder:validation:Required + Safi string `json:"safi"` +} + +// CiliumBGPFamilyWithAdverts represents a AFI/SAFI address family pair along with reference to BGP Advertisements. +type CiliumBGPFamilyWithAdverts struct { + CiliumBGPFamily `json:",inline"` + + // Advertisements selects group of BGP Advertisement(s) to advertise for this family. + // + // If not specified, no advertisements are sent for this family. + // + // This field is ignored in CiliumBGPNeighbor which is used in CiliumBGPPeeringPolicy. + // Use CiliumBGPPeeringPolicy advertisement options instead. + // + // +kubebuilder:validation:Optional + Advertisements *slimv1.LabelSelector `json:"advertisements,omitempty"` +} + +// CiliumBGPTransport defines the BGP transport parameters for the peer. +type CiliumBGPTransport struct { + // LocalPort is the local port to be used for the BGP session. + // + // If not specified, defaults to TCP port 179. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=179 + LocalPort *int32 `json:"localPort,omitempty"` + + // PeerPort is the peer port to be used for the BGP session. + // + // If not specified, defaults to TCP port 179. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=179 + PeerPort *int32 `json:"peerPort,omitempty"` +} + +type CiliumBGPTimers struct { + // ConnectRetryTimeSeconds defines the initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8). + // + // If not specified, defaults to 120 seconds. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=2147483647 + // +kubebuilder:default=120 + ConnectRetryTimeSeconds *int32 `json:"connectRetryTimeSeconds,omitempty"` + + // HoldTimeSeconds defines the initial value for the BGP HoldTimer (RFC 4271, Section 4.2). + // Updating this value will cause a session reset. + // + // If not specified, defaults to 90 seconds. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=3 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=90 + HoldTimeSeconds *int32 `json:"holdTimeSeconds,omitempty"` + + // KeepaliveTimeSeconds defines the initial value for the BGP KeepaliveTimer (RFC 4271, Section 8). + // It can not be larger than HoldTimeSeconds. Updating this value will cause a session reset. + // + // If not specified, defaults to 30 seconds. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=30 + KeepAliveTimeSeconds *int32 `json:"keepAliveTimeSeconds,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go index b2fb7840fe..3558015228 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go @@ -27,6 +27,15 @@ const ( DefaultBGPKeepAliveTimeSeconds = 30 // DefaultBGPGRRestartTimeSeconds defines default Restart Time for graceful restart (RFC 4724, section 4.2) DefaultBGPGRRestartTimeSeconds = 120 + // BGPLoadBalancerClass defines the BGP Control Plane load balancer class for Services. + BGPLoadBalancerClass = "io.cilium/bgp-control-plane" + // PodCIDRSelectorName defines the name for a selector matching Pod CIDRs + // (standard cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs). + PodCIDRSelectorName = "PodCIDR" + // CiliumLoadBalancerIPPoolSelectorName defines the name for a selector matching CiliumLoadBalancerIPPool resources. + CiliumLoadBalancerIPPoolSelectorName = "CiliumLoadBalancerIPPool" + // CiliumPodIPPoolSelectorName defines the name for a selector matching CiliumPodIPPool resources. + CiliumPodIPPoolSelectorName = CPIPKindDefinition ) // +genclient @@ -99,6 +108,70 @@ type CiliumBGPNeighborGracefulRestart struct { RestartTimeSeconds *int32 `json:"restartTimeSeconds,omitempty"` } +// BGPStandardCommunity type represents a value of the "standard" 32-bit BGP Communities Attribute (RFC 1997) +// as a 4-byte decimal number or two 2-byte decimal numbers separated by a colon. +// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` +type BGPStandardCommunity string + +// BGPLargeCommunity type represents a value of the BGP Large Communities Attribute (RFC 8092), +// as three 4-byte decimal numbers separated by colons. +// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$` +type BGPLargeCommunity string + +// BGPCommunities holds community values of the supported BGP community path attributes. +type BGPCommunities struct { + // Standard holds a list of "standard" 32-bit BGP Communities Attribute (RFC 1997) values. + // + // +kubebuilder:validation:Optional + Standard []BGPStandardCommunity `json:"standard,omitempty"` + + // Large holds a list of the BGP Large Communities Attribute (RFC 8092) values. + // + // +kubebuilder:validation:Optional + Large []BGPLargeCommunity `json:"large,omitempty"` +} + +// CiliumBGPPathAttributes can be used to apply additional path attributes +// to matched routes when advertising them to a BGP peer. +type CiliumBGPPathAttributes struct { + // SelectorType defines the object type on which the Selector applies: + // - For "PodCIDR" the Selector matches k8s CiliumNode resources + // (path attributes apply to routes announced for PodCIDRs of selected CiliumNodes. + // Only affects routes of cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs. + // - For "CiliumLoadBalancerIPPool" the Selector matches CiliumLoadBalancerIPPool custom resources + // (path attributes apply to routes announced for selected CiliumLoadBalancerIPPools). + // - For "CiliumPodIPPool" the Selector matches CiliumPodIPPool custom resources + // (path attributes apply to routes announced for allocated CIDRs of selected CiliumPodIPPools). + // + // +kubebuilder:validation:Enum=PodCIDR;CiliumLoadBalancerIPPool;CiliumPodIPPool + // +kubebuilder:validation:Required + SelectorType string `json:"selectorType"` + + // Selector selects a group of objects of the SelectorType + // resulting into routes that will be announced with the configured Attributes. + // If nil / not set, all objects of the SelectorType are selected. + // + // +kubebuilder:validation:Optional + Selector *slimv1.LabelSelector `json:"selector,omitempty"` + + // Communities defines a set of community values advertised in the supported BGP Communities path attributes. + // If nil / not set, no BGP Communities path attribute will be advertised. + // + // +kubebuilder:validation:Optional + Communities *BGPCommunities `json:"communities,omitempty"` + + // LocalPreference defines the preference value advertised in the BGP Local Preference path attribute. + // As Local Preference is only valid for iBGP peers, this value will be ignored for eBGP peers + // (no Local Preference path attribute will be advertised). + // If nil / not set, the default Local Preference of 100 will be advertised in + // the Local Preference path attribute for iBGP peers. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=4294967295 + LocalPreference *int64 `json:"localPreference,omitempty"` +} + // CiliumBGPNeighbor is a neighboring peer for use in a // CiliumBGPVirtualRouter configuration. type CiliumBGPNeighbor struct { @@ -124,6 +197,10 @@ type CiliumBGPNeighbor struct { // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=4294967295 PeerASN int64 `json:"peerASN"` + // AuthSecretRef is the name of the secret to use to fetch a TCP + // authentication password for this peer. + // +kubebuilder:validation:Optional + AuthSecretRef *string `json:"authSecretRef,omitempty"` // EBGPMultihopTTL controls the multi-hop feature for eBGP peers. // Its value defines the Time To Live (TTL) value used in BGP packets sent to the neighbor. // The value 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed). @@ -162,6 +239,20 @@ type CiliumBGPNeighbor struct { // // +kubebuilder:validation:Optional GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"` + // Families, if provided, defines a set of AFI/SAFIs the speaker will + // negotiate with it's peer. + // + // If this slice is not provided the default families of IPv6 and IPv4 will + // be provided. + // + // +kubebuilder:validation:Optional + Families []CiliumBGPFamily `json:"families"` + // AdvertisedPathAttributes can be used to apply additional path attributes + // to selected routes when advertising them to the peer. + // If empty / nil, no additional path attributes are advertised. + // + // +kubebuilder:validation:Optional + AdvertisedPathAttributes []CiliumBGPPathAttributes `json:"advertisedPathAttributes,omitempty"` } // CiliumBGPVirtualRouter defines a discrete BGP virtual router configuration. @@ -179,8 +270,20 @@ type CiliumBGPVirtualRouter struct { // +kubebuilder:validation:Optional // +kubebuilder:default=false ExportPodCIDR *bool `json:"exportPodCIDR,omitempty"` + // PodIPPoolSelector selects CiliumPodIPPools based on labels. The virtual + // router will announce allocated CIDRs of matching CiliumPodIPPools. + // + // If empty / nil no CiliumPodIPPools will be announced. + // + // +kubebuilder:validation:Optional + PodIPPoolSelector *slimv1.LabelSelector `json:"podIPPoolSelector,omitempty"` // ServiceSelector selects a group of load balancer services which this - // virtual router will announce. + // virtual router will announce. The loadBalancerClass for a service must + // be nil or specify a class supported by Cilium, e.g. "io.cilium/bgp-control-plane". + // Refer to the following document for additional details regarding load balancer + // classes: + // + // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class // // If empty / nil no services will be announced. // @@ -237,6 +340,18 @@ func (n *CiliumBGPNeighbor) SetDefaults() { (n.GracefulRestart.RestartTimeSeconds == nil || *n.GracefulRestart.RestartTimeSeconds == 0) { n.GracefulRestart.RestartTimeSeconds = pointer.Int32(DefaultBGPGRRestartTimeSeconds) } + if len(n.Families) == 0 { + n.Families = []CiliumBGPFamily{ + { + Afi: "ipv4", + Safi: "unicast", + }, + { + Afi: "ipv6", + Safi: "unicast", + }, + } + } } // Validate validates CiliumBGPNeighbor's configuration constraints diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go index 7a7851cb15..7170260459 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go @@ -9,6 +9,9 @@ import ( slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" ) +// L2AnnounceLoadBalancerClass defines the L2 Announcer load balancer class for Services. +const L2AnnounceLoadBalancerClass = "io.cilium/l2-announcer" + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -64,7 +67,12 @@ type CiliumL2AnnouncementPolicySpec struct { // // +kubebuilder:validation:Optional NodeSelector *slimv1.LabelSelector `json:"nodeSelector"` - // ServiceSelector selects a set of services which will be announced over L2 networks + // ServiceSelector selects a set of services which will be announced over L2 networks. + // The loadBalancerClass for a service must be nil or specify a supported class, e.g. + // "io.cilium/l2-announcer". Refer to the following document for additional details + // regarding load balancer classes: + // + // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class // // If nil this policy applies to all services. // diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go index 7fc7ad95bd..95d5414f7a 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go @@ -14,8 +14,8 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories={cilium},singular="ciliumloadbalancerippool",path="ciliumloadbalancerippools",scope="Cluster",shortName={ippools,ippool,lbippool,lbippools} // +kubebuilder:printcolumn:JSONPath=".spec.disabled",name="Disabled",type=boolean -// +kubebuilder:printcolumn:name="Conflicting",type=string,JSONPath=`.status.conditions[?(@.type=="io.cilium/conflict")].status` -// +kubebuilder:printcolumn:name="IPs Available",type=string,JSONPath=`.status.conditions[?(@.type=="io.cilium/ips-available")].message` +// +kubebuilder:printcolumn:name="Conflicting",type=string,JSONPath=`.status.conditions[?(@.type=="cilium.io/PoolConflict")].status` +// +kubebuilder:printcolumn:name="IPs Available",type=string,JSONPath=`.status.conditions[?(@.type=="cilium.io/IPsAvailable")].message` // +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date // +kubebuilder:subresource:status // +kubebuilder:storageversion @@ -69,11 +69,22 @@ type CiliumLoadBalancerIPPoolSpec struct { // // +kubebuilder:validation:Optional ServiceSelector *slimv1.LabelSelector `json:"serviceSelector"` - // CiliumLoadBalancerIPPoolCIDRBlock is a list of CIDRs comprising this IP Pool + // AllowFirstLastIPs, if set to `yes` means that the first and last IPs of each CIDR will be allocatable. + // If `no` or undefined, these IPs will be reserved. This field is ignored for /{31,32} and /{127,128} CIDRs since + // reserving the first and last IPs would make the CIDRs unusable. // - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - Cidrs []CiliumLoadBalancerIPPoolCIDRBlock `json:"cidrs"` + // +kubebuilder:validation:Optional + AllowFirstLastIPs AllowFirstLastIPType `json:"allowFirstLastIPs,omitempty"` + // Cidrs is a list of CIDRs comprising this IP Pool + // Deprecated: please use the `blocks` field instead. This field will be removed in a future release. + // https://github.com/cilium/cilium/issues/28590 + // + // +kubebuilder:validation:Optional + Cidrs []CiliumLoadBalancerIPPoolIPBlock `json:"cidrs,omitempty"` + // Blocks is a list of CIDRs comprising this IP Pool + // + // +kubebuilder:validation:Optional + Blocks []CiliumLoadBalancerIPPoolIPBlock `json:"blocks,omitempty"` // Disabled, if set to true means that no new IPs will be allocated from this pool. // Existing allocations will not be removed from services. // @@ -82,11 +93,23 @@ type CiliumLoadBalancerIPPoolSpec struct { Disabled bool `json:"disabled"` } -// CiliumLoadBalancerIPPoolCIDRBlock describes a single CIDR block. -type CiliumLoadBalancerIPPoolCIDRBlock struct { +// +kubebuilder:validation:Enum=Yes;No +type AllowFirstLastIPType string + +const ( + AllowFirstLastIPNo AllowFirstLastIPType = "No" + AllowFirstLastIPYes AllowFirstLastIPType = "Yes" +) + +// CiliumLoadBalancerIPPoolIPBlock describes a single IP block. +type CiliumLoadBalancerIPPoolIPBlock struct { // +kubebuilder:validation:Format=cidr - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional Cidr IPv4orIPv6CIDR `json:"cidr"` + // +kubebuilder:validation:Optional + Start string `json:"start,omitempty"` + // +kubebuilder:validation:Optional + Stop string `json:"stop,omitempty"` } // +deepequal-gen=false diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go index 32bb859573..54d7f9014c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go @@ -40,6 +40,31 @@ const ( // BGPPName is the full name of Cilium BGP Peering Policy BGPPName = BGPPPluralName + "." + CustomResourceDefinitionGroup + // BGPClusterConfig (BGPCC) + BGPCCPluralName = "ciliumbgpclusterconfigs" + BGPCCKindDefinition = "CiliumBGPClusterConfig" + BGPCCName = BGPCCPluralName + "." + CustomResourceDefinitionGroup + + // BGPPeerConfig (BGPPC) + BGPPCPluralName = "ciliumbgppeerconfigs" + BGPPCKindDefinition = "CiliumBGPPeerConfig" + BGPPCName = BGPPCPluralName + "." + CustomResourceDefinitionGroup + + // BGPAdvertisement (BGPA) + BGPAPluralName = "ciliumbgpadvertisements" + BGPAKindDefinition = "CiliumBGPAdvertisement" + BGPAName = BGPAPluralName + "." + CustomResourceDefinitionGroup + + // BGPNodeConfig (BGPNC) + BGPNCPluralName = "ciliumbgpnodeconfigs" + BGPNCKindDefinition = "CiliumBGPNodeConfig" + BGPNCName = BGPNCPluralName + "." + CustomResourceDefinitionGroup + + // BGPNodeConfigOverride (BGPNCO) + BGPNCOPluralName = "ciliumbgpnodeconfigoverrides" + BGPNCOKindDefinition = "CiliumBGPNodeConfigOverride" + BGPNCOName = BGPNCOPluralName + "." + CustomResourceDefinitionGroup + // Cilium Load Balancer IP Pool (IPPool) // PoolPluralName is the plural name of Cilium Load Balancer IP Pool @@ -136,6 +161,18 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CiliumL2AnnouncementPolicyList{}, &CiliumPodIPPool{}, &CiliumPodIPPoolList{}, + + // new BGP types + &CiliumBGPClusterConfig{}, + &CiliumBGPClusterConfigList{}, + &CiliumBGPPeerConfig{}, + &CiliumBGPPeerConfigList{}, + &CiliumBGPAdvertisement{}, + &CiliumBGPAdvertisementList{}, + &CiliumBGPNodeConfig{}, + &CiliumBGPNodeConfigList{}, + &CiliumBGPNodeConfigOverride{}, + &CiliumBGPNodeConfigOverrideList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go index 813cf6a56d..d8ce33de8e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go @@ -17,69 +17,968 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Advertisement) DeepCopyInto(out *Advertisement) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = new(CiliumBGPAttributes) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Advertisement. +func (in *Advertisement) DeepCopy() *Advertisement { + if in == nil { + return nil + } + out := new(Advertisement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPCommunities) DeepCopyInto(out *BGPCommunities) { + *out = *in + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]BGPStandardCommunity, len(*in)) + copy(*out, *in) + } + if in.Large != nil { + in, out := &in.Large, &out.Large + *out = make([]BGPLargeCommunity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPCommunities. +func (in *BGPCommunities) DeepCopy() *BGPCommunities { + if in == nil { + return nil + } + out := new(BGPCommunities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAdvertisement) DeepCopyInto(out *CiliumBGPAdvertisement) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisement. +func (in *CiliumBGPAdvertisement) DeepCopy() *CiliumBGPAdvertisement { + if in == nil { + return nil + } + out := new(CiliumBGPAdvertisement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPAdvertisement) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAdvertisementList) DeepCopyInto(out *CiliumBGPAdvertisementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPAdvertisement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementList. +func (in *CiliumBGPAdvertisementList) DeepCopy() *CiliumBGPAdvertisementList { + if in == nil { + return nil + } + out := new(CiliumBGPAdvertisementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPAdvertisementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAdvertisementSpec) DeepCopyInto(out *CiliumBGPAdvertisementSpec) { + *out = *in + if in.Advertisements != nil { + in, out := &in.Advertisements, &out.Advertisements + *out = make([]Advertisement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementSpec. +func (in *CiliumBGPAdvertisementSpec) DeepCopy() *CiliumBGPAdvertisementSpec { + if in == nil { + return nil + } + out := new(CiliumBGPAdvertisementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAttributes) DeepCopyInto(out *CiliumBGPAttributes) { + *out = *in + if in.Community != nil { + in, out := &in.Community, &out.Community + *out = new(BGPCommunities) + (*in).DeepCopyInto(*out) + } + if in.LocalPreference != nil { + in, out := &in.LocalPreference, &out.LocalPreference + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAttributes. +func (in *CiliumBGPAttributes) DeepCopy() *CiliumBGPAttributes { + if in == nil { + return nil + } + out := new(CiliumBGPAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfig) DeepCopyInto(out *CiliumBGPClusterConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfig. +func (in *CiliumBGPClusterConfig) DeepCopy() *CiliumBGPClusterConfig { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPClusterConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfigList) DeepCopyInto(out *CiliumBGPClusterConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPClusterConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigList. +func (in *CiliumBGPClusterConfigList) DeepCopy() *CiliumBGPClusterConfigList { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPClusterConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfigSpec) DeepCopyInto(out *CiliumBGPClusterConfigSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigSpec. +func (in *CiliumBGPClusterConfigSpec) DeepCopy() *CiliumBGPClusterConfigSpec { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPFamily) DeepCopyInto(out *CiliumBGPFamily) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamily. +func (in *CiliumBGPFamily) DeepCopy() *CiliumBGPFamily { + if in == nil { + return nil + } + out := new(CiliumBGPFamily) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPFamilyWithAdverts) DeepCopyInto(out *CiliumBGPFamilyWithAdverts) { + *out = *in + out.CiliumBGPFamily = in.CiliumBGPFamily + if in.Advertisements != nil { + in, out := &in.Advertisements, &out.Advertisements + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamilyWithAdverts. +func (in *CiliumBGPFamilyWithAdverts) DeepCopy() *CiliumBGPFamilyWithAdverts { + if in == nil { + return nil + } + out := new(CiliumBGPFamilyWithAdverts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPInstance) DeepCopyInto(out *CiliumBGPInstance) { + *out = *in + if in.LocalASN != nil { + in, out := &in.LocalASN, &out.LocalASN + *out = new(int64) + **out = **in + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]CiliumBGPPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPInstance. +func (in *CiliumBGPInstance) DeepCopy() *CiliumBGPInstance { + if in == nil { + return nil + } + out := new(CiliumBGPInstance) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumBGPNeighbor) DeepCopyInto(out *CiliumBGPNeighbor) { *out = *in - if in.PeerPort != nil { - in, out := &in.PeerPort, &out.PeerPort - *out = new(int32) + if in.PeerPort != nil { + in, out := &in.PeerPort, &out.PeerPort + *out = new(int32) + **out = **in + } + if in.AuthSecretRef != nil { + in, out := &in.AuthSecretRef, &out.AuthSecretRef + *out = new(string) + **out = **in + } + if in.EBGPMultihopTTL != nil { + in, out := &in.EBGPMultihopTTL, &out.EBGPMultihopTTL + *out = new(int32) + **out = **in + } + if in.ConnectRetryTimeSeconds != nil { + in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds + *out = new(int32) + **out = **in + } + if in.HoldTimeSeconds != nil { + in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds + *out = new(int32) + **out = **in + } + if in.KeepAliveTimeSeconds != nil { + in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds + *out = new(int32) + **out = **in + } + if in.GracefulRestart != nil { + in, out := &in.GracefulRestart, &out.GracefulRestart + *out = new(CiliumBGPNeighborGracefulRestart) + (*in).DeepCopyInto(*out) + } + if in.Families != nil { + in, out := &in.Families, &out.Families + *out = make([]CiliumBGPFamily, len(*in)) + copy(*out, *in) + } + if in.AdvertisedPathAttributes != nil { + in, out := &in.AdvertisedPathAttributes, &out.AdvertisedPathAttributes + *out = make([]CiliumBGPPathAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighbor. +func (in *CiliumBGPNeighbor) DeepCopy() *CiliumBGPNeighbor { + if in == nil { + return nil + } + out := new(CiliumBGPNeighbor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) { + *out = *in + if in.RestartTimeSeconds != nil { + in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart. +func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart { + if in == nil { + return nil + } + out := new(CiliumBGPNeighborGracefulRestart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfig) DeepCopyInto(out *CiliumBGPNodeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfig. +func (in *CiliumBGPNodeConfig) DeepCopy() *CiliumBGPNodeConfig { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopyInto(out *CiliumBGPNodeConfigInstanceOverride) { + *out = *in + if in.RouterID != nil { + in, out := &in.RouterID, &out.RouterID + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]CiliumBGPNodeConfigPeerOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigInstanceOverride. +func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopy() *CiliumBGPNodeConfigInstanceOverride { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigInstanceOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigList) DeepCopyInto(out *CiliumBGPNodeConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPNodeConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigList. +func (in *CiliumBGPNodeConfigList) DeepCopy() *CiliumBGPNodeConfigList { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigOverride) DeepCopyInto(out *CiliumBGPNodeConfigOverride) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverride. +func (in *CiliumBGPNodeConfigOverride) DeepCopy() *CiliumBGPNodeConfigOverride { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfigOverride) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigOverrideList) DeepCopyInto(out *CiliumBGPNodeConfigOverrideList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPNodeConfigOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideList. +func (in *CiliumBGPNodeConfigOverrideList) DeepCopy() *CiliumBGPNodeConfigOverrideList { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigOverrideList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfigOverrideList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopyInto(out *CiliumBGPNodeConfigOverrideSpec) { + *out = *in + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPNodeConfigInstanceOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideSpec. +func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopy() *CiliumBGPNodeConfigOverrideSpec { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigOverrideSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigPeerOverride) DeepCopyInto(out *CiliumBGPNodeConfigPeerOverride) { + *out = *in + if in.LocalAddress != nil { + in, out := &in.LocalAddress, &out.LocalAddress + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigPeerOverride. +func (in *CiliumBGPNodeConfigPeerOverride) DeepCopy() *CiliumBGPNodeConfigPeerOverride { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigPeerOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeInstance) DeepCopyInto(out *CiliumBGPNodeInstance) { + *out = *in + if in.LocalASN != nil { + in, out := &in.LocalASN, &out.LocalASN + *out = new(int64) + **out = **in + } + if in.RouterID != nil { + in, out := &in.RouterID, &out.RouterID + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]CiliumBGPNodePeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstance. +func (in *CiliumBGPNodeInstance) DeepCopy() *CiliumBGPNodeInstance { + if in == nil { + return nil + } + out := new(CiliumBGPNodeInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeInstanceStatus) DeepCopyInto(out *CiliumBGPNodeInstanceStatus) { + *out = *in + if in.LocalASN != nil { + in, out := &in.LocalASN, &out.LocalASN + *out = new(int64) + **out = **in + } + if in.PeerStatuses != nil { + in, out := &in.PeerStatuses, &out.PeerStatuses + *out = make([]CiliumBGPNodePeerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstanceStatus. +func (in *CiliumBGPNodeInstanceStatus) DeepCopy() *CiliumBGPNodeInstanceStatus { + if in == nil { + return nil + } + out := new(CiliumBGPNodeInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodePeer) DeepCopyInto(out *CiliumBGPNodePeer) { + *out = *in + if in.PeerAddress != nil { + in, out := &in.PeerAddress, &out.PeerAddress + *out = new(string) + **out = **in + } + if in.PeerASN != nil { + in, out := &in.PeerASN, &out.PeerASN + *out = new(int64) + **out = **in + } + if in.LocalAddress != nil { + in, out := &in.LocalAddress, &out.LocalAddress + *out = new(string) + **out = **in + } + if in.PeerConfigRef != nil { + in, out := &in.PeerConfigRef, &out.PeerConfigRef + *out = new(PeerConfigReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeer. +func (in *CiliumBGPNodePeer) DeepCopy() *CiliumBGPNodePeer { + if in == nil { + return nil + } + out := new(CiliumBGPNodePeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodePeerStatus) DeepCopyInto(out *CiliumBGPNodePeerStatus) { + *out = *in + if in.PeerASN != nil { + in, out := &in.PeerASN, &out.PeerASN + *out = new(int64) **out = **in } - if in.EBGPMultihopTTL != nil { - in, out := &in.EBGPMultihopTTL, &out.EBGPMultihopTTL - *out = new(int32) + if in.PeeringState != nil { + in, out := &in.PeeringState, &out.PeeringState + *out = new(string) **out = **in } - if in.ConnectRetryTimeSeconds != nil { - in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds - *out = new(int32) + if in.Timers != nil { + in, out := &in.Timers, &out.Timers + *out = new(CiliumBGPTimersState) + (*in).DeepCopyInto(*out) + } + if in.Uptime != nil { + in, out := &in.Uptime, &out.Uptime + *out = new(string) **out = **in } - if in.HoldTimeSeconds != nil { - in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds + if in.RoutesReceived != nil { + in, out := &in.RoutesReceived, &out.RoutesReceived *out = new(int32) **out = **in } - if in.KeepAliveTimeSeconds != nil { - in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds + if in.RoutesAdvertised != nil { + in, out := &in.RoutesAdvertised, &out.RoutesAdvertised *out = new(int32) **out = **in } - if in.GracefulRestart != nil { - in, out := &in.GracefulRestart, &out.GracefulRestart - *out = new(CiliumBGPNeighborGracefulRestart) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeerStatus. +func (in *CiliumBGPNodePeerStatus) DeepCopy() *CiliumBGPNodePeerStatus { + if in == nil { + return nil + } + out := new(CiliumBGPNodePeerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeSpec) DeepCopyInto(out *CiliumBGPNodeSpec) { + *out = *in + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPNodeInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeSpec. +func (in *CiliumBGPNodeSpec) DeepCopy() *CiliumBGPNodeSpec { + if in == nil { + return nil + } + out := new(CiliumBGPNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeStatus) DeepCopyInto(out *CiliumBGPNodeStatus) { + *out = *in + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPNodeInstanceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeStatus. +func (in *CiliumBGPNodeStatus) DeepCopy() *CiliumBGPNodeStatus { + if in == nil { + return nil + } + out := new(CiliumBGPNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPathAttributes) DeepCopyInto(out *CiliumBGPPathAttributes) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Communities != nil { + in, out := &in.Communities, &out.Communities + *out = new(BGPCommunities) (*in).DeepCopyInto(*out) } + if in.LocalPreference != nil { + in, out := &in.LocalPreference, &out.LocalPreference + *out = new(int64) + **out = **in + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighbor. -func (in *CiliumBGPNeighbor) DeepCopy() *CiliumBGPNeighbor { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPathAttributes. +func (in *CiliumBGPPathAttributes) DeepCopy() *CiliumBGPPathAttributes { if in == nil { return nil } - out := new(CiliumBGPNeighbor) + out := new(CiliumBGPPathAttributes) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) { +func (in *CiliumBGPPeer) DeepCopyInto(out *CiliumBGPPeer) { *out = *in - if in.RestartTimeSeconds != nil { - in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds + if in.PeerAddress != nil { + in, out := &in.PeerAddress, &out.PeerAddress + *out = new(string) + **out = **in + } + if in.PeerASN != nil { + in, out := &in.PeerASN, &out.PeerASN + *out = new(int64) + **out = **in + } + if in.PeerConfigRef != nil { + in, out := &in.PeerConfigRef, &out.PeerConfigRef + *out = new(PeerConfigReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeer. +func (in *CiliumBGPPeer) DeepCopy() *CiliumBGPPeer { + if in == nil { + return nil + } + out := new(CiliumBGPPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfig) DeepCopyInto(out *CiliumBGPPeerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfig. +func (in *CiliumBGPPeerConfig) DeepCopy() *CiliumBGPPeerConfig { + if in == nil { + return nil + } + out := new(CiliumBGPPeerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPPeerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfigList) DeepCopyInto(out *CiliumBGPPeerConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPPeerConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigList. +func (in *CiliumBGPPeerConfigList) DeepCopy() *CiliumBGPPeerConfigList { + if in == nil { + return nil + } + out := new(CiliumBGPPeerConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPPeerConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfigSpec) DeepCopyInto(out *CiliumBGPPeerConfigSpec) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(CiliumBGPTransport) + (*in).DeepCopyInto(*out) + } + if in.Timers != nil { + in, out := &in.Timers, &out.Timers + *out = new(CiliumBGPTimers) + (*in).DeepCopyInto(*out) + } + if in.AuthSecretRef != nil { + in, out := &in.AuthSecretRef, &out.AuthSecretRef + *out = new(string) + **out = **in + } + if in.GracefulRestart != nil { + in, out := &in.GracefulRestart, &out.GracefulRestart + *out = new(CiliumBGPNeighborGracefulRestart) + (*in).DeepCopyInto(*out) + } + if in.EBGPMultihop != nil { + in, out := &in.EBGPMultihop, &out.EBGPMultihop *out = new(int32) **out = **in } + if in.Families != nil { + in, out := &in.Families, &out.Families + *out = make([]CiliumBGPFamilyWithAdverts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart. -func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigSpec. +func (in *CiliumBGPPeerConfigSpec) DeepCopy() *CiliumBGPPeerConfigSpec { if in == nil { return nil } - out := new(CiliumBGPNeighborGracefulRestart) + out := new(CiliumBGPPeerConfigSpec) in.DeepCopyInto(out) return out } @@ -172,6 +1071,89 @@ func (in *CiliumBGPPeeringPolicySpec) DeepCopy() *CiliumBGPPeeringPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPTimers) DeepCopyInto(out *CiliumBGPTimers) { + *out = *in + if in.ConnectRetryTimeSeconds != nil { + in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds + *out = new(int32) + **out = **in + } + if in.HoldTimeSeconds != nil { + in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds + *out = new(int32) + **out = **in + } + if in.KeepAliveTimeSeconds != nil { + in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimers. +func (in *CiliumBGPTimers) DeepCopy() *CiliumBGPTimers { + if in == nil { + return nil + } + out := new(CiliumBGPTimers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPTimersState) DeepCopyInto(out *CiliumBGPTimersState) { + *out = *in + if in.AppliedHoldTimeSeconds != nil { + in, out := &in.AppliedHoldTimeSeconds, &out.AppliedHoldTimeSeconds + *out = new(int32) + **out = **in + } + if in.AppliedKeepaliveSeconds != nil { + in, out := &in.AppliedKeepaliveSeconds, &out.AppliedKeepaliveSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimersState. +func (in *CiliumBGPTimersState) DeepCopy() *CiliumBGPTimersState { + if in == nil { + return nil + } + out := new(CiliumBGPTimersState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPTransport) DeepCopyInto(out *CiliumBGPTransport) { + *out = *in + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + if in.PeerPort != nil { + in, out := &in.PeerPort, &out.PeerPort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTransport. +func (in *CiliumBGPTransport) DeepCopy() *CiliumBGPTransport { + if in == nil { + return nil + } + out := new(CiliumBGPTransport) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumBGPVirtualRouter) DeepCopyInto(out *CiliumBGPVirtualRouter) { *out = *in @@ -180,6 +1162,11 @@ func (in *CiliumBGPVirtualRouter) DeepCopyInto(out *CiliumBGPVirtualRouter) { *out = new(bool) **out = **in } + if in.PodIPPoolSelector != nil { + in, out := &in.PodIPPoolSelector, &out.PodIPPoolSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } if in.ServiceSelector != nil { in, out := &in.ServiceSelector, &out.ServiceSelector *out = new(v1.LabelSelector) @@ -496,17 +1483,17 @@ func (in *CiliumLoadBalancerIPPool) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolCIDRBlock) { +func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolIPBlock) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolCIDRBlock. -func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepCopy() *CiliumLoadBalancerIPPoolCIDRBlock { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolIPBlock. +func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopy() *CiliumLoadBalancerIPPoolIPBlock { if in == nil { return nil } - out := new(CiliumLoadBalancerIPPoolCIDRBlock) + out := new(CiliumLoadBalancerIPPoolIPBlock) in.DeepCopyInto(out) return out } @@ -554,7 +1541,12 @@ func (in *CiliumLoadBalancerIPPoolSpec) DeepCopyInto(out *CiliumLoadBalancerIPPo } if in.Cidrs != nil { in, out := &in.Cidrs, &out.Cidrs - *out = make([]CiliumLoadBalancerIPPoolCIDRBlock, len(*in)) + *out = make([]CiliumLoadBalancerIPPoolIPBlock, len(*in)) + copy(*out, *in) + } + if in.Blocks != nil { + in, out := &in.Blocks, &out.Blocks + *out = make([]CiliumLoadBalancerIPPoolIPBlock, len(*in)) copy(*out, *in) } return @@ -867,3 +1859,19 @@ func (in *IPv6PoolSpec) DeepCopy() *IPv6PoolSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerConfigReference) DeepCopyInto(out *PeerConfigReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerConfigReference. +func (in *PeerConfigReference) DeepCopy() *PeerConfigReference { + if in == nil { + return nil + } + out := new(PeerConfigReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go index 7908b87c12..a64995c6b9 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go @@ -8,6 +8,272 @@ package v2alpha1 +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Advertisement) DeepEqual(other *Advertisement) bool { + if other == nil { + return false + } + + if in.AdvertisementType != other.AdvertisementType { + return false + } + if (in.Selector == nil) != (other.Selector == nil) { + return false + } else if in.Selector != nil { + if !in.Selector.DeepEqual(other.Selector) { + return false + } + } + + if (in.Attributes == nil) != (other.Attributes == nil) { + return false + } else if in.Attributes != nil { + if !in.Attributes.DeepEqual(other.Attributes) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BGPCommunities) DeepEqual(other *BGPCommunities) bool { + if other == nil { + return false + } + + if ((in.Standard != nil) && (other.Standard != nil)) || ((in.Standard == nil) != (other.Standard == nil)) { + in, other := &in.Standard, &other.Standard + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if ((in.Large != nil) && (other.Large != nil)) || ((in.Large == nil) != (other.Large == nil)) { + in, other := &in.Large, &other.Large + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPAdvertisement) DeepEqual(other *CiliumBGPAdvertisement) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPAdvertisementSpec) DeepEqual(other *CiliumBGPAdvertisementSpec) bool { + if other == nil { + return false + } + + if ((in.Advertisements != nil) && (other.Advertisements != nil)) || ((in.Advertisements == nil) != (other.Advertisements == nil)) { + in, other := &in.Advertisements, &other.Advertisements + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPAttributes) DeepEqual(other *CiliumBGPAttributes) bool { + if other == nil { + return false + } + + if (in.Community == nil) != (other.Community == nil) { + return false + } else if in.Community != nil { + if !in.Community.DeepEqual(other.Community) { + return false + } + } + + if (in.LocalPreference == nil) != (other.LocalPreference == nil) { + return false + } else if in.LocalPreference != nil { + if *in.LocalPreference != *other.LocalPreference { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPClusterConfig) DeepEqual(other *CiliumBGPClusterConfig) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPClusterConfigSpec) DeepEqual(other *CiliumBGPClusterConfigSpec) bool { + if other == nil { + return false + } + + if (in.NodeSelector == nil) != (other.NodeSelector == nil) { + return false + } else if in.NodeSelector != nil { + if !in.NodeSelector.DeepEqual(other.NodeSelector) { + return false + } + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPFamily) DeepEqual(other *CiliumBGPFamily) bool { + if other == nil { + return false + } + + if in.Afi != other.Afi { + return false + } + if in.Safi != other.Safi { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPFamilyWithAdverts) DeepEqual(other *CiliumBGPFamilyWithAdverts) bool { + if other == nil { + return false + } + + if in.CiliumBGPFamily != other.CiliumBGPFamily { + return false + } + + if (in.Advertisements == nil) != (other.Advertisements == nil) { + return false + } else if in.Advertisements != nil { + if !in.Advertisements.DeepEqual(other.Advertisements) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPInstance) DeepEqual(other *CiliumBGPInstance) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalASN == nil) != (other.LocalASN == nil) { + return false + } else if in.LocalASN != nil { + if *in.LocalASN != *other.LocalASN { + return false + } + } + + if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) { + in, other := &in.Peers, &other.Peers + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool { @@ -15,56 +281,585 @@ func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool { return false } - if in.PeerAddress != other.PeerAddress { - return false + if in.PeerAddress != other.PeerAddress { + return false + } + if (in.PeerPort == nil) != (other.PeerPort == nil) { + return false + } else if in.PeerPort != nil { + if *in.PeerPort != *other.PeerPort { + return false + } + } + + if in.PeerASN != other.PeerASN { + return false + } + if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) { + return false + } else if in.AuthSecretRef != nil { + if *in.AuthSecretRef != *other.AuthSecretRef { + return false + } + } + + if (in.EBGPMultihopTTL == nil) != (other.EBGPMultihopTTL == nil) { + return false + } else if in.EBGPMultihopTTL != nil { + if *in.EBGPMultihopTTL != *other.EBGPMultihopTTL { + return false + } + } + + if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) { + return false + } else if in.ConnectRetryTimeSeconds != nil { + if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds { + return false + } + } + + if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) { + return false + } else if in.HoldTimeSeconds != nil { + if *in.HoldTimeSeconds != *other.HoldTimeSeconds { + return false + } + } + + if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) { + return false + } else if in.KeepAliveTimeSeconds != nil { + if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds { + return false + } + } + + if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) { + return false + } else if in.GracefulRestart != nil { + if !in.GracefulRestart.DeepEqual(other.GracefulRestart) { + return false + } + } + + if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) { + in, other := &in.Families, &other.Families + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.AdvertisedPathAttributes != nil) && (other.AdvertisedPathAttributes != nil)) || ((in.AdvertisedPathAttributes == nil) != (other.AdvertisedPathAttributes == nil)) { + in, other := &in.AdvertisedPathAttributes, &other.AdvertisedPathAttributes + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool { + if other == nil { + return false + } + + if in.Enabled != other.Enabled { + return false + } + if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) { + return false + } else if in.RestartTimeSeconds != nil { + if *in.RestartTimeSeconds != *other.RestartTimeSeconds { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfig) DeepEqual(other *CiliumBGPNodeConfig) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + if !in.Status.DeepEqual(&other.Status) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigInstanceOverride) DeepEqual(other *CiliumBGPNodeConfigInstanceOverride) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.RouterID == nil) != (other.RouterID == nil) { + return false + } else if in.RouterID != nil { + if *in.RouterID != *other.RouterID { + return false + } + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) { + in, other := &in.Peers, &other.Peers + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigOverride) DeepEqual(other *CiliumBGPNodeConfigOverride) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigOverrideSpec) DeepEqual(other *CiliumBGPNodeConfigOverrideSpec) bool { + if other == nil { + return false + } + + if in.NodeRef != other.NodeRef { + return false + } + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigPeerOverride) DeepEqual(other *CiliumBGPNodeConfigPeerOverride) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalAddress == nil) != (other.LocalAddress == nil) { + return false + } else if in.LocalAddress != nil { + if *in.LocalAddress != *other.LocalAddress { + return false + } + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeInstance) DeepEqual(other *CiliumBGPNodeInstance) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalASN == nil) != (other.LocalASN == nil) { + return false + } else if in.LocalASN != nil { + if *in.LocalASN != *other.LocalASN { + return false + } + } + + if (in.RouterID == nil) != (other.RouterID == nil) { + return false + } else if in.RouterID != nil { + if *in.RouterID != *other.RouterID { + return false + } + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) { + in, other := &in.Peers, &other.Peers + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeInstanceStatus) DeepEqual(other *CiliumBGPNodeInstanceStatus) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalASN == nil) != (other.LocalASN == nil) { + return false + } else if in.LocalASN != nil { + if *in.LocalASN != *other.LocalASN { + return false + } + } + + if ((in.PeerStatuses != nil) && (other.PeerStatuses != nil)) || ((in.PeerStatuses == nil) != (other.PeerStatuses == nil)) { + in, other := &in.PeerStatuses, &other.PeerStatuses + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodePeer) DeepEqual(other *CiliumBGPNodePeer) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.PeerAddress == nil) != (other.PeerAddress == nil) { + return false + } else if in.PeerAddress != nil { + if *in.PeerAddress != *other.PeerAddress { + return false + } + } + + if (in.PeerASN == nil) != (other.PeerASN == nil) { + return false + } else if in.PeerASN != nil { + if *in.PeerASN != *other.PeerASN { + return false + } + } + + if (in.LocalAddress == nil) != (other.LocalAddress == nil) { + return false + } else if in.LocalAddress != nil { + if *in.LocalAddress != *other.LocalAddress { + return false + } + } + + if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) { + return false + } else if in.PeerConfigRef != nil { + if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodePeerStatus) DeepEqual(other *CiliumBGPNodePeerStatus) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if in.PeerAddress != other.PeerAddress { + return false + } + if (in.PeerASN == nil) != (other.PeerASN == nil) { + return false + } else if in.PeerASN != nil { + if *in.PeerASN != *other.PeerASN { + return false + } + } + + if (in.PeeringState == nil) != (other.PeeringState == nil) { + return false + } else if in.PeeringState != nil { + if *in.PeeringState != *other.PeeringState { + return false + } + } + + if (in.Timers == nil) != (other.Timers == nil) { + return false + } else if in.Timers != nil { + if !in.Timers.DeepEqual(other.Timers) { + return false + } + } + + if (in.Uptime == nil) != (other.Uptime == nil) { + return false + } else if in.Uptime != nil { + if *in.Uptime != *other.Uptime { + return false + } + } + + if (in.RoutesReceived == nil) != (other.RoutesReceived == nil) { + return false + } else if in.RoutesReceived != nil { + if *in.RoutesReceived != *other.RoutesReceived { + return false + } + } + + if (in.RoutesAdvertised == nil) != (other.RoutesAdvertised == nil) { + return false + } else if in.RoutesAdvertised != nil { + if *in.RoutesAdvertised != *other.RoutesAdvertised { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeSpec) DeepEqual(other *CiliumBGPNodeSpec) bool { + if other == nil { + return false + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } } - if (in.PeerPort == nil) != (other.PeerPort == nil) { + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeStatus) DeepEqual(other *CiliumBGPNodeStatus) bool { + if other == nil { return false - } else if in.PeerPort != nil { - if *in.PeerPort != *other.PeerPort { + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } } } - if in.PeerASN != other.PeerASN { + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPathAttributes) DeepEqual(other *CiliumBGPPathAttributes) bool { + if other == nil { return false } - if (in.EBGPMultihopTTL == nil) != (other.EBGPMultihopTTL == nil) { + + if in.SelectorType != other.SelectorType { return false - } else if in.EBGPMultihopTTL != nil { - if *in.EBGPMultihopTTL != *other.EBGPMultihopTTL { + } + if (in.Selector == nil) != (other.Selector == nil) { + return false + } else if in.Selector != nil { + if !in.Selector.DeepEqual(other.Selector) { return false } } - if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) { + if (in.Communities == nil) != (other.Communities == nil) { return false - } else if in.ConnectRetryTimeSeconds != nil { - if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds { + } else if in.Communities != nil { + if !in.Communities.DeepEqual(other.Communities) { return false } } - if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) { + if (in.LocalPreference == nil) != (other.LocalPreference == nil) { return false - } else if in.HoldTimeSeconds != nil { - if *in.HoldTimeSeconds != *other.HoldTimeSeconds { + } else if in.LocalPreference != nil { + if *in.LocalPreference != *other.LocalPreference { return false } } - if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) { + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPeer) DeepEqual(other *CiliumBGPPeer) bool { + if other == nil { return false - } else if in.KeepAliveTimeSeconds != nil { - if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds { + } + + if in.Name != other.Name { + return false + } + if (in.PeerAddress == nil) != (other.PeerAddress == nil) { + return false + } else if in.PeerAddress != nil { + if *in.PeerAddress != *other.PeerAddress { return false } } - if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) { + if (in.PeerASN == nil) != (other.PeerASN == nil) { return false - } else if in.GracefulRestart != nil { - if !in.GracefulRestart.DeepEqual(other.GracefulRestart) { + } else if in.PeerASN != nil { + if *in.PeerASN != *other.PeerASN { + return false + } + } + + if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) { + return false + } else if in.PeerConfigRef != nil { + if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) { return false } } @@ -74,19 +869,79 @@ func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool { // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool { +func (in *CiliumBGPPeerConfig) DeepEqual(other *CiliumBGPPeerConfig) bool { if other == nil { return false } - if in.Enabled != other.Enabled { + if !in.Spec.DeepEqual(&other.Spec) { return false } - if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) { + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPeerConfigSpec) DeepEqual(other *CiliumBGPPeerConfigSpec) bool { + if other == nil { return false - } else if in.RestartTimeSeconds != nil { - if *in.RestartTimeSeconds != *other.RestartTimeSeconds { + } + + if (in.Transport == nil) != (other.Transport == nil) { + return false + } else if in.Transport != nil { + if !in.Transport.DeepEqual(other.Transport) { + return false + } + } + + if (in.Timers == nil) != (other.Timers == nil) { + return false + } else if in.Timers != nil { + if !in.Timers.DeepEqual(other.Timers) { + return false + } + } + + if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) { + return false + } else if in.AuthSecretRef != nil { + if *in.AuthSecretRef != *other.AuthSecretRef { + return false + } + } + + if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) { + return false + } else if in.GracefulRestart != nil { + if !in.GracefulRestart.DeepEqual(other.GracefulRestart) { + return false + } + } + + if (in.EBGPMultihop == nil) != (other.EBGPMultihop == nil) { + return false + } else if in.EBGPMultihop != nil { + if *in.EBGPMultihop != *other.EBGPMultihop { + return false + } + } + + if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) { + in, other := &in.Families, &other.Families + if other == nil { + return false + } + + if len(*in) != len(*other) { return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } } } @@ -142,6 +997,92 @@ func (in *CiliumBGPPeeringPolicySpec) DeepEqual(other *CiliumBGPPeeringPolicySpe return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPTimers) DeepEqual(other *CiliumBGPTimers) bool { + if other == nil { + return false + } + + if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) { + return false + } else if in.ConnectRetryTimeSeconds != nil { + if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds { + return false + } + } + + if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) { + return false + } else if in.HoldTimeSeconds != nil { + if *in.HoldTimeSeconds != *other.HoldTimeSeconds { + return false + } + } + + if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) { + return false + } else if in.KeepAliveTimeSeconds != nil { + if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPTimersState) DeepEqual(other *CiliumBGPTimersState) bool { + if other == nil { + return false + } + + if (in.AppliedHoldTimeSeconds == nil) != (other.AppliedHoldTimeSeconds == nil) { + return false + } else if in.AppliedHoldTimeSeconds != nil { + if *in.AppliedHoldTimeSeconds != *other.AppliedHoldTimeSeconds { + return false + } + } + + if (in.AppliedKeepaliveSeconds == nil) != (other.AppliedKeepaliveSeconds == nil) { + return false + } else if in.AppliedKeepaliveSeconds != nil { + if *in.AppliedKeepaliveSeconds != *other.AppliedKeepaliveSeconds { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPTransport) DeepEqual(other *CiliumBGPTransport) bool { + if other == nil { + return false + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + if (in.PeerPort == nil) != (other.PeerPort == nil) { + return false + } else if in.PeerPort != nil { + if *in.PeerPort != *other.PeerPort { + return false + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool { @@ -160,6 +1101,14 @@ func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool } } + if (in.PodIPPoolSelector == nil) != (other.PodIPPoolSelector == nil) { + return false + } else if in.PodIPPoolSelector != nil { + if !in.PodIPPoolSelector.DeepEqual(other.PodIPPoolSelector) { + return false + } + } + if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) { return false } else if in.ServiceSelector != nil { @@ -324,7 +1273,7 @@ func (in *CiliumLoadBalancerIPPool) DeepEqual(other *CiliumLoadBalancerIPPool) b // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepEqual(other *CiliumLoadBalancerIPPoolCIDRBlock) bool { +func (in *CiliumLoadBalancerIPPoolIPBlock) DeepEqual(other *CiliumLoadBalancerIPPoolIPBlock) bool { if other == nil { return false } @@ -332,6 +1281,12 @@ func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepEqual(other *CiliumLoadBalancer if in.Cidr != other.Cidr { return false } + if in.Start != other.Start { + return false + } + if in.Stop != other.Stop { + return false + } return true } @@ -351,6 +1306,9 @@ func (in *CiliumLoadBalancerIPPoolSpec) DeepEqual(other *CiliumLoadBalancerIPPoo } } + if in.AllowFirstLastIPs != other.AllowFirstLastIPs { + return false + } if ((in.Cidrs != nil) && (other.Cidrs != nil)) || ((in.Cidrs == nil) != (other.Cidrs == nil)) { in, other := &in.Cidrs, &other.Cidrs if other == nil { @@ -368,6 +1326,23 @@ func (in *CiliumLoadBalancerIPPoolSpec) DeepEqual(other *CiliumLoadBalancerIPPoo } } + if ((in.Blocks != nil) && (other.Blocks != nil)) || ((in.Blocks == nil) != (other.Blocks == nil)) { + in, other := &in.Blocks, &other.Blocks + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + if in.Disabled != other.Disabled { return false } @@ -537,3 +1512,23 @@ func (in *IPv6PoolSpec) DeepEqual(other *IPv6PoolSpec) bool { return true } + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *PeerConfigReference) DeepEqual(other *PeerConfigReference) bool { + if other == nil { + return false + } + + if in.Group != other.Group { + return false + } + if in.Kind != other.Kind { + return false + } + if in.Name != other.Name { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go index 0e56f45914..a217d05b8e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go @@ -15,6 +15,11 @@ import ( type CiliumV2alpha1Interface interface { RESTClient() rest.Interface + CiliumBGPAdvertisementsGetter + CiliumBGPClusterConfigsGetter + CiliumBGPNodeConfigsGetter + CiliumBGPNodeConfigOverridesGetter + CiliumBGPPeerConfigsGetter CiliumBGPPeeringPoliciesGetter CiliumCIDRGroupsGetter CiliumEndpointSlicesGetter @@ -29,6 +34,26 @@ type CiliumV2alpha1Client struct { restClient rest.Interface } +func (c *CiliumV2alpha1Client) CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface { + return newCiliumBGPAdvertisements(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface { + return newCiliumBGPClusterConfigs(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface { + return newCiliumBGPNodeConfigs(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface { + return newCiliumBGPNodeConfigOverrides(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface { + return newCiliumBGPPeerConfigs(c) +} + func (c *CiliumV2alpha1Client) CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInterface { return newCiliumBGPPeeringPolicies(c) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go new file mode 100644 index 0000000000..3488a94706 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CiliumBGPAdvertisementsGetter has a method to return a CiliumBGPAdvertisementInterface. +// A group's client should implement this interface. +type CiliumBGPAdvertisementsGetter interface { + CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface +} + +// CiliumBGPAdvertisementInterface has methods to work with CiliumBGPAdvertisement resources. +type CiliumBGPAdvertisementInterface interface { + Create(ctx context.Context, ciliumBGPAdvertisement *v2alpha1.CiliumBGPAdvertisement, opts v1.CreateOptions) (*v2alpha1.CiliumBGPAdvertisement, error) + Update(ctx context.Context, ciliumBGPAdvertisement *v2alpha1.CiliumBGPAdvertisement, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPAdvertisement, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPAdvertisement, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPAdvertisementList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPAdvertisement, err error) + CiliumBGPAdvertisementExpansion +} + +// ciliumBGPAdvertisements implements CiliumBGPAdvertisementInterface +type ciliumBGPAdvertisements struct { + client rest.Interface +} + +// newCiliumBGPAdvertisements returns a CiliumBGPAdvertisements +func newCiliumBGPAdvertisements(c *CiliumV2alpha1Client) *ciliumBGPAdvertisements { + return &ciliumBGPAdvertisements{ + client: c.RESTClient(), + } +} + +// Get takes name of the ciliumBGPAdvertisement, and returns the corresponding ciliumBGPAdvertisement object, and an error if there is any. +func (c *ciliumBGPAdvertisements) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + result = &v2alpha1.CiliumBGPAdvertisement{} + err = c.client.Get(). + Resource("ciliumbgpadvertisements"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CiliumBGPAdvertisements that match those selectors. +func (c *ciliumBGPAdvertisements) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPAdvertisementList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.CiliumBGPAdvertisementList{} + err = c.client.Get(). + Resource("ciliumbgpadvertisements"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPAdvertisements. +func (c *ciliumBGPAdvertisements) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ciliumbgpadvertisements"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a ciliumBGPAdvertisement and creates it. Returns the server's representation of the ciliumBGPAdvertisement, and an error, if there is any. +func (c *ciliumBGPAdvertisements) Create(ctx context.Context, ciliumBGPAdvertisement *v2alpha1.CiliumBGPAdvertisement, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + result = &v2alpha1.CiliumBGPAdvertisement{} + err = c.client.Post(). + Resource("ciliumbgpadvertisements"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPAdvertisement). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a ciliumBGPAdvertisement and updates it. Returns the server's representation of the ciliumBGPAdvertisement, and an error, if there is any. +func (c *ciliumBGPAdvertisements) Update(ctx context.Context, ciliumBGPAdvertisement *v2alpha1.CiliumBGPAdvertisement, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + result = &v2alpha1.CiliumBGPAdvertisement{} + err = c.client.Put(). + Resource("ciliumbgpadvertisements"). + Name(ciliumBGPAdvertisement.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPAdvertisement). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the ciliumBGPAdvertisement and deletes it. Returns an error if one occurs. +func (c *ciliumBGPAdvertisements) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ciliumbgpadvertisements"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ciliumBGPAdvertisements) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ciliumbgpadvertisements"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched ciliumBGPAdvertisement. +func (c *ciliumBGPAdvertisements) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPAdvertisement, err error) { + result = &v2alpha1.CiliumBGPAdvertisement{} + err = c.client.Patch(pt). + Resource("ciliumbgpadvertisements"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..b40c50f257 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CiliumBGPClusterConfigsGetter has a method to return a CiliumBGPClusterConfigInterface. +// A group's client should implement this interface. +type CiliumBGPClusterConfigsGetter interface { + CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface +} + +// CiliumBGPClusterConfigInterface has methods to work with CiliumBGPClusterConfig resources. +type CiliumBGPClusterConfigInterface interface { + Create(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.CreateOptions) (*v2alpha1.CiliumBGPClusterConfig, error) + Update(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPClusterConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPClusterConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPClusterConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPClusterConfig, err error) + CiliumBGPClusterConfigExpansion +} + +// ciliumBGPClusterConfigs implements CiliumBGPClusterConfigInterface +type ciliumBGPClusterConfigs struct { + client rest.Interface +} + +// newCiliumBGPClusterConfigs returns a CiliumBGPClusterConfigs +func newCiliumBGPClusterConfigs(c *CiliumV2alpha1Client) *ciliumBGPClusterConfigs { + return &ciliumBGPClusterConfigs{ + client: c.RESTClient(), + } +} + +// Get takes name of the ciliumBGPClusterConfig, and returns the corresponding ciliumBGPClusterConfig object, and an error if there is any. +func (c *ciliumBGPClusterConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + result = &v2alpha1.CiliumBGPClusterConfig{} + err = c.client.Get(). + Resource("ciliumbgpclusterconfigs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CiliumBGPClusterConfigs that match those selectors. +func (c *ciliumBGPClusterConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPClusterConfigList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.CiliumBGPClusterConfigList{} + err = c.client.Get(). + Resource("ciliumbgpclusterconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPClusterConfigs. +func (c *ciliumBGPClusterConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ciliumbgpclusterconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a ciliumBGPClusterConfig and creates it. Returns the server's representation of the ciliumBGPClusterConfig, and an error, if there is any. +func (c *ciliumBGPClusterConfigs) Create(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + result = &v2alpha1.CiliumBGPClusterConfig{} + err = c.client.Post(). + Resource("ciliumbgpclusterconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPClusterConfig). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a ciliumBGPClusterConfig and updates it. Returns the server's representation of the ciliumBGPClusterConfig, and an error, if there is any. +func (c *ciliumBGPClusterConfigs) Update(ctx context.Context, ciliumBGPClusterConfig *v2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + result = &v2alpha1.CiliumBGPClusterConfig{} + err = c.client.Put(). + Resource("ciliumbgpclusterconfigs"). + Name(ciliumBGPClusterConfig.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPClusterConfig). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the ciliumBGPClusterConfig and deletes it. Returns an error if one occurs. +func (c *ciliumBGPClusterConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ciliumbgpclusterconfigs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ciliumBGPClusterConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ciliumbgpclusterconfigs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched ciliumBGPClusterConfig. +func (c *ciliumBGPClusterConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPClusterConfig, err error) { + result = &v2alpha1.CiliumBGPClusterConfig{} + err = c.client.Patch(pt). + Resource("ciliumbgpclusterconfigs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..005a60d648 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CiliumBGPNodeConfigsGetter has a method to return a CiliumBGPNodeConfigInterface. +// A group's client should implement this interface. +type CiliumBGPNodeConfigsGetter interface { + CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface +} + +// CiliumBGPNodeConfigInterface has methods to work with CiliumBGPNodeConfig resources. +type CiliumBGPNodeConfigInterface interface { + Create(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.CreateOptions) (*v2alpha1.CiliumBGPNodeConfig, error) + Update(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPNodeConfig, error) + UpdateStatus(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPNodeConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPNodeConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPNodeConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPNodeConfig, err error) + CiliumBGPNodeConfigExpansion +} + +// ciliumBGPNodeConfigs implements CiliumBGPNodeConfigInterface +type ciliumBGPNodeConfigs struct { + client rest.Interface +} + +// newCiliumBGPNodeConfigs returns a CiliumBGPNodeConfigs +func newCiliumBGPNodeConfigs(c *CiliumV2alpha1Client) *ciliumBGPNodeConfigs { + return &ciliumBGPNodeConfigs{ + client: c.RESTClient(), + } +} + +// Get takes name of the ciliumBGPNodeConfig, and returns the corresponding ciliumBGPNodeConfig object, and an error if there is any. +func (c *ciliumBGPNodeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + result = &v2alpha1.CiliumBGPNodeConfig{} + err = c.client.Get(). + Resource("ciliumbgpnodeconfigs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CiliumBGPNodeConfigs that match those selectors. +func (c *ciliumBGPNodeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPNodeConfigList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.CiliumBGPNodeConfigList{} + err = c.client.Get(). + Resource("ciliumbgpnodeconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPNodeConfigs. +func (c *ciliumBGPNodeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ciliumbgpnodeconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a ciliumBGPNodeConfig and creates it. Returns the server's representation of the ciliumBGPNodeConfig, and an error, if there is any. +func (c *ciliumBGPNodeConfigs) Create(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + result = &v2alpha1.CiliumBGPNodeConfig{} + err = c.client.Post(). + Resource("ciliumbgpnodeconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPNodeConfig). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a ciliumBGPNodeConfig and updates it. Returns the server's representation of the ciliumBGPNodeConfig, and an error, if there is any. +func (c *ciliumBGPNodeConfigs) Update(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + result = &v2alpha1.CiliumBGPNodeConfig{} + err = c.client.Put(). + Resource("ciliumbgpnodeconfigs"). + Name(ciliumBGPNodeConfig.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPNodeConfig). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *ciliumBGPNodeConfigs) UpdateStatus(ctx context.Context, ciliumBGPNodeConfig *v2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + result = &v2alpha1.CiliumBGPNodeConfig{} + err = c.client.Put(). + Resource("ciliumbgpnodeconfigs"). + Name(ciliumBGPNodeConfig.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPNodeConfig). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the ciliumBGPNodeConfig and deletes it. Returns an error if one occurs. +func (c *ciliumBGPNodeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ciliumbgpnodeconfigs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ciliumBGPNodeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ciliumbgpnodeconfigs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched ciliumBGPNodeConfig. +func (c *ciliumBGPNodeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPNodeConfig, err error) { + result = &v2alpha1.CiliumBGPNodeConfig{} + err = c.client.Patch(pt). + Resource("ciliumbgpnodeconfigs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..6c2276fdeb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CiliumBGPNodeConfigOverridesGetter has a method to return a CiliumBGPNodeConfigOverrideInterface. +// A group's client should implement this interface. +type CiliumBGPNodeConfigOverridesGetter interface { + CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface +} + +// CiliumBGPNodeConfigOverrideInterface has methods to work with CiliumBGPNodeConfigOverride resources. +type CiliumBGPNodeConfigOverrideInterface interface { + Create(ctx context.Context, ciliumBGPNodeConfigOverride *v2alpha1.CiliumBGPNodeConfigOverride, opts v1.CreateOptions) (*v2alpha1.CiliumBGPNodeConfigOverride, error) + Update(ctx context.Context, ciliumBGPNodeConfigOverride *v2alpha1.CiliumBGPNodeConfigOverride, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPNodeConfigOverride, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPNodeConfigOverride, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPNodeConfigOverrideList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) + CiliumBGPNodeConfigOverrideExpansion +} + +// ciliumBGPNodeConfigOverrides implements CiliumBGPNodeConfigOverrideInterface +type ciliumBGPNodeConfigOverrides struct { + client rest.Interface +} + +// newCiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrides +func newCiliumBGPNodeConfigOverrides(c *CiliumV2alpha1Client) *ciliumBGPNodeConfigOverrides { + return &ciliumBGPNodeConfigOverrides{ + client: c.RESTClient(), + } +} + +// Get takes name of the ciliumBGPNodeConfigOverride, and returns the corresponding ciliumBGPNodeConfigOverride object, and an error if there is any. +func (c *ciliumBGPNodeConfigOverrides) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + result = &v2alpha1.CiliumBGPNodeConfigOverride{} + err = c.client.Get(). + Resource("ciliumbgpnodeconfigoverrides"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CiliumBGPNodeConfigOverrides that match those selectors. +func (c *ciliumBGPNodeConfigOverrides) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPNodeConfigOverrideList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.CiliumBGPNodeConfigOverrideList{} + err = c.client.Get(). + Resource("ciliumbgpnodeconfigoverrides"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPNodeConfigOverrides. +func (c *ciliumBGPNodeConfigOverrides) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ciliumbgpnodeconfigoverrides"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a ciliumBGPNodeConfigOverride and creates it. Returns the server's representation of the ciliumBGPNodeConfigOverride, and an error, if there is any. +func (c *ciliumBGPNodeConfigOverrides) Create(ctx context.Context, ciliumBGPNodeConfigOverride *v2alpha1.CiliumBGPNodeConfigOverride, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + result = &v2alpha1.CiliumBGPNodeConfigOverride{} + err = c.client.Post(). + Resource("ciliumbgpnodeconfigoverrides"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPNodeConfigOverride). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a ciliumBGPNodeConfigOverride and updates it. Returns the server's representation of the ciliumBGPNodeConfigOverride, and an error, if there is any. +func (c *ciliumBGPNodeConfigOverrides) Update(ctx context.Context, ciliumBGPNodeConfigOverride *v2alpha1.CiliumBGPNodeConfigOverride, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + result = &v2alpha1.CiliumBGPNodeConfigOverride{} + err = c.client.Put(). + Resource("ciliumbgpnodeconfigoverrides"). + Name(ciliumBGPNodeConfigOverride.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPNodeConfigOverride). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the ciliumBGPNodeConfigOverride and deletes it. Returns an error if one occurs. +func (c *ciliumBGPNodeConfigOverrides) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ciliumbgpnodeconfigoverrides"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ciliumBGPNodeConfigOverrides) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ciliumbgpnodeconfigoverrides"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched ciliumBGPNodeConfigOverride. +func (c *ciliumBGPNodeConfigOverrides) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPNodeConfigOverride, err error) { + result = &v2alpha1.CiliumBGPNodeConfigOverride{} + err = c.client.Patch(pt). + Resource("ciliumbgpnodeconfigoverrides"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go new file mode 100644 index 0000000000..da99152671 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CiliumBGPPeerConfigsGetter has a method to return a CiliumBGPPeerConfigInterface. +// A group's client should implement this interface. +type CiliumBGPPeerConfigsGetter interface { + CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface +} + +// CiliumBGPPeerConfigInterface has methods to work with CiliumBGPPeerConfig resources. +type CiliumBGPPeerConfigInterface interface { + Create(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.CreateOptions) (*v2alpha1.CiliumBGPPeerConfig, error) + Update(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPPeerConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPPeerConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPPeerConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeerConfig, err error) + CiliumBGPPeerConfigExpansion +} + +// ciliumBGPPeerConfigs implements CiliumBGPPeerConfigInterface +type ciliumBGPPeerConfigs struct { + client rest.Interface +} + +// newCiliumBGPPeerConfigs returns a CiliumBGPPeerConfigs +func newCiliumBGPPeerConfigs(c *CiliumV2alpha1Client) *ciliumBGPPeerConfigs { + return &ciliumBGPPeerConfigs{ + client: c.RESTClient(), + } +} + +// Get takes name of the ciliumBGPPeerConfig, and returns the corresponding ciliumBGPPeerConfig object, and an error if there is any. +func (c *ciliumBGPPeerConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + result = &v2alpha1.CiliumBGPPeerConfig{} + err = c.client.Get(). + Resource("ciliumbgppeerconfigs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CiliumBGPPeerConfigs that match those selectors. +func (c *ciliumBGPPeerConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPPeerConfigList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.CiliumBGPPeerConfigList{} + err = c.client.Get(). + Resource("ciliumbgppeerconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ciliumBGPPeerConfigs. +func (c *ciliumBGPPeerConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ciliumbgppeerconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a ciliumBGPPeerConfig and creates it. Returns the server's representation of the ciliumBGPPeerConfig, and an error, if there is any. +func (c *ciliumBGPPeerConfigs) Create(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + result = &v2alpha1.CiliumBGPPeerConfig{} + err = c.client.Post(). + Resource("ciliumbgppeerconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPPeerConfig). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a ciliumBGPPeerConfig and updates it. Returns the server's representation of the ciliumBGPPeerConfig, and an error, if there is any. +func (c *ciliumBGPPeerConfigs) Update(ctx context.Context, ciliumBGPPeerConfig *v2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + result = &v2alpha1.CiliumBGPPeerConfig{} + err = c.client.Put(). + Resource("ciliumbgppeerconfigs"). + Name(ciliumBGPPeerConfig.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ciliumBGPPeerConfig). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the ciliumBGPPeerConfig and deletes it. Returns an error if one occurs. +func (c *ciliumBGPPeerConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ciliumbgppeerconfigs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ciliumBGPPeerConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ciliumbgppeerconfigs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched ciliumBGPPeerConfig. +func (c *ciliumBGPPeerConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeerConfig, err error) { + result = &v2alpha1.CiliumBGPPeerConfig{} + err = c.client.Patch(pt). + Resource("ciliumbgppeerconfigs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go index 7dc4b1076c..4b25b32bb8 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go @@ -5,6 +5,16 @@ package v2alpha1 +type CiliumBGPAdvertisementExpansion interface{} + +type CiliumBGPClusterConfigExpansion interface{} + +type CiliumBGPNodeConfigExpansion interface{} + +type CiliumBGPNodeConfigOverrideExpansion interface{} + +type CiliumBGPPeerConfigExpansion interface{} + type CiliumBGPPeeringPolicyExpansion interface{} type CiliumCIDRGroupExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go new file mode 100644 index 0000000000..80132f6348 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + time "time" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPAdvertisementInformer provides access to a shared informer and lister for +// CiliumBGPAdvertisements. +type CiliumBGPAdvertisementInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CiliumBGPAdvertisementLister +} + +type ciliumBGPAdvertisementInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPAdvertisementInformer constructs a new informer for CiliumBGPAdvertisement type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPAdvertisementInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPAdvertisementInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPAdvertisementInformer constructs a new informer for CiliumBGPAdvertisement type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPAdvertisementInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPAdvertisements().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPAdvertisements().Watch(context.TODO(), options) + }, + }, + &ciliumiov2alpha1.CiliumBGPAdvertisement{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPAdvertisementInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPAdvertisementInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPAdvertisementInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&ciliumiov2alpha1.CiliumBGPAdvertisement{}, f.defaultInformer) +} + +func (f *ciliumBGPAdvertisementInformer) Lister() v2alpha1.CiliumBGPAdvertisementLister { + return v2alpha1.NewCiliumBGPAdvertisementLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..9afb488fe9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + time "time" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPClusterConfigInformer provides access to a shared informer and lister for +// CiliumBGPClusterConfigs. +type CiliumBGPClusterConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CiliumBGPClusterConfigLister +} + +type ciliumBGPClusterConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPClusterConfigInformer constructs a new informer for CiliumBGPClusterConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPClusterConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPClusterConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPClusterConfigInformer constructs a new informer for CiliumBGPClusterConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPClusterConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPClusterConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPClusterConfigs().Watch(context.TODO(), options) + }, + }, + &ciliumiov2alpha1.CiliumBGPClusterConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPClusterConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPClusterConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPClusterConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&ciliumiov2alpha1.CiliumBGPClusterConfig{}, f.defaultInformer) +} + +func (f *ciliumBGPClusterConfigInformer) Lister() v2alpha1.CiliumBGPClusterConfigLister { + return v2alpha1.NewCiliumBGPClusterConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..9fb6f9806a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + time "time" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigInformer provides access to a shared informer and lister for +// CiliumBGPNodeConfigs. +type CiliumBGPNodeConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CiliumBGPNodeConfigLister +} + +type ciliumBGPNodeConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPNodeConfigInformer constructs a new informer for CiliumBGPNodeConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPNodeConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPNodeConfigInformer constructs a new informer for CiliumBGPNodeConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPNodeConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigs().Watch(context.TODO(), options) + }, + }, + &ciliumiov2alpha1.CiliumBGPNodeConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPNodeConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPNodeConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&ciliumiov2alpha1.CiliumBGPNodeConfig{}, f.defaultInformer) +} + +func (f *ciliumBGPNodeConfigInformer) Lister() v2alpha1.CiliumBGPNodeConfigLister { + return v2alpha1.NewCiliumBGPNodeConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..6f010f350c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + time "time" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigOverrideInformer provides access to a shared informer and lister for +// CiliumBGPNodeConfigOverrides. +type CiliumBGPNodeConfigOverrideInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CiliumBGPNodeConfigOverrideLister +} + +type ciliumBGPNodeConfigOverrideInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPNodeConfigOverrideInformer constructs a new informer for CiliumBGPNodeConfigOverride type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPNodeConfigOverrideInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigOverrideInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPNodeConfigOverrideInformer constructs a new informer for CiliumBGPNodeConfigOverride type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPNodeConfigOverrideInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigOverrides().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigOverrides().Watch(context.TODO(), options) + }, + }, + &ciliumiov2alpha1.CiliumBGPNodeConfigOverride{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPNodeConfigOverrideInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigOverrideInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPNodeConfigOverrideInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&ciliumiov2alpha1.CiliumBGPNodeConfigOverride{}, f.defaultInformer) +} + +func (f *ciliumBGPNodeConfigOverrideInformer) Lister() v2alpha1.CiliumBGPNodeConfigOverrideLister { + return v2alpha1.NewCiliumBGPNodeConfigOverrideLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go new file mode 100644 index 0000000000..958fc765cc --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + time "time" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPPeerConfigInformer provides access to a shared informer and lister for +// CiliumBGPPeerConfigs. +type CiliumBGPPeerConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CiliumBGPPeerConfigLister +} + +type ciliumBGPPeerConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPPeerConfigInformer constructs a new informer for CiliumBGPPeerConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPPeerConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPPeerConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPPeerConfigInformer constructs a new informer for CiliumBGPPeerConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPPeerConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPPeerConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPPeerConfigs().Watch(context.TODO(), options) + }, + }, + &ciliumiov2alpha1.CiliumBGPPeerConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPPeerConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPPeerConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPPeerConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&ciliumiov2alpha1.CiliumBGPPeerConfig{}, f.defaultInformer) +} + +func (f *ciliumBGPPeerConfigInformer) Lister() v2alpha1.CiliumBGPPeerConfigLister { + return v2alpha1.NewCiliumBGPPeerConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go index 0301a05ac0..5d508bcbc2 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go @@ -11,6 +11,16 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // CiliumBGPAdvertisements returns a CiliumBGPAdvertisementInformer. + CiliumBGPAdvertisements() CiliumBGPAdvertisementInformer + // CiliumBGPClusterConfigs returns a CiliumBGPClusterConfigInformer. + CiliumBGPClusterConfigs() CiliumBGPClusterConfigInformer + // CiliumBGPNodeConfigs returns a CiliumBGPNodeConfigInformer. + CiliumBGPNodeConfigs() CiliumBGPNodeConfigInformer + // CiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrideInformer. + CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInformer + // CiliumBGPPeerConfigs returns a CiliumBGPPeerConfigInformer. + CiliumBGPPeerConfigs() CiliumBGPPeerConfigInformer // CiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicyInformer. CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInformer // CiliumCIDRGroups returns a CiliumCIDRGroupInformer. @@ -38,6 +48,31 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// CiliumBGPAdvertisements returns a CiliumBGPAdvertisementInformer. +func (v *version) CiliumBGPAdvertisements() CiliumBGPAdvertisementInformer { + return &ciliumBGPAdvertisementInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPClusterConfigs returns a CiliumBGPClusterConfigInformer. +func (v *version) CiliumBGPClusterConfigs() CiliumBGPClusterConfigInformer { + return &ciliumBGPClusterConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPNodeConfigs returns a CiliumBGPNodeConfigInformer. +func (v *version) CiliumBGPNodeConfigs() CiliumBGPNodeConfigInformer { + return &ciliumBGPNodeConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrideInformer. +func (v *version) CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInformer { + return &ciliumBGPNodeConfigOverrideInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPPeerConfigs returns a CiliumBGPPeerConfigInformer. +func (v *version) CiliumBGPPeerConfigs() CiliumBGPPeerConfigInformer { + return &ciliumBGPPeerConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // CiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicyInformer. func (v *version) CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInformer { return &ciliumBGPPeeringPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go index 21a6ed6b58..a3035298bd 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go @@ -29,6 +29,7 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -67,6 +68,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -153,7 +162,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -171,6 +180,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -226,7 +236,7 @@ type SharedInformerFactory interface { // ForResource gives generic access to a shared informer of the matching type. ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - // InternalInformerFor returns the SharedIndexInformer for obj using an internal + // InformerFor returns the SharedIndexInformer for obj using an internal // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go index 7ed1e8f6ea..840386c971 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go @@ -63,6 +63,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2().CiliumNodes().Informer()}, nil // Group=cilium.io, Version=v2alpha1 + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpadvertisements"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPAdvertisements().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpclusterconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPClusterConfigs().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpnodeconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPNodeConfigs().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpnodeconfigoverrides"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPNodeConfigOverrides().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeerconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPPeerConfigs().Informer()}, nil case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeeringpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPPeeringPolicies().Informer()}, nil case v2alpha1.SchemeGroupVersion.WithResource("ciliumcidrgroups"): diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go new file mode 100644 index 0000000000..f4a04f3ae8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CiliumBGPAdvertisementLister helps list CiliumBGPAdvertisements. +// All objects returned here must be treated as read-only. +type CiliumBGPAdvertisementLister interface { + // List lists all CiliumBGPAdvertisements in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPAdvertisement, err error) + // Get retrieves the CiliumBGPAdvertisement from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v2alpha1.CiliumBGPAdvertisement, error) + CiliumBGPAdvertisementListerExpansion +} + +// ciliumBGPAdvertisementLister implements the CiliumBGPAdvertisementLister interface. +type ciliumBGPAdvertisementLister struct { + indexer cache.Indexer +} + +// NewCiliumBGPAdvertisementLister returns a new CiliumBGPAdvertisementLister. +func NewCiliumBGPAdvertisementLister(indexer cache.Indexer) CiliumBGPAdvertisementLister { + return &ciliumBGPAdvertisementLister{indexer: indexer} +} + +// List lists all CiliumBGPAdvertisements in the indexer. +func (s *ciliumBGPAdvertisementLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPAdvertisement, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CiliumBGPAdvertisement)) + }) + return ret, err +} + +// Get retrieves the CiliumBGPAdvertisement from the index for a given name. +func (s *ciliumBGPAdvertisementLister) Get(name string) (*v2alpha1.CiliumBGPAdvertisement, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("ciliumbgpadvertisement"), name) + } + return obj.(*v2alpha1.CiliumBGPAdvertisement), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..ad5131c256 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CiliumBGPClusterConfigLister helps list CiliumBGPClusterConfigs. +// All objects returned here must be treated as read-only. +type CiliumBGPClusterConfigLister interface { + // List lists all CiliumBGPClusterConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPClusterConfig, err error) + // Get retrieves the CiliumBGPClusterConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v2alpha1.CiliumBGPClusterConfig, error) + CiliumBGPClusterConfigListerExpansion +} + +// ciliumBGPClusterConfigLister implements the CiliumBGPClusterConfigLister interface. +type ciliumBGPClusterConfigLister struct { + indexer cache.Indexer +} + +// NewCiliumBGPClusterConfigLister returns a new CiliumBGPClusterConfigLister. +func NewCiliumBGPClusterConfigLister(indexer cache.Indexer) CiliumBGPClusterConfigLister { + return &ciliumBGPClusterConfigLister{indexer: indexer} +} + +// List lists all CiliumBGPClusterConfigs in the indexer. +func (s *ciliumBGPClusterConfigLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPClusterConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CiliumBGPClusterConfig)) + }) + return ret, err +} + +// Get retrieves the CiliumBGPClusterConfig from the index for a given name. +func (s *ciliumBGPClusterConfigLister) Get(name string) (*v2alpha1.CiliumBGPClusterConfig, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("ciliumbgpclusterconfig"), name) + } + return obj.(*v2alpha1.CiliumBGPClusterConfig), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..ff6398f6d5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigLister helps list CiliumBGPNodeConfigs. +// All objects returned here must be treated as read-only. +type CiliumBGPNodeConfigLister interface { + // List lists all CiliumBGPNodeConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPNodeConfig, err error) + // Get retrieves the CiliumBGPNodeConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v2alpha1.CiliumBGPNodeConfig, error) + CiliumBGPNodeConfigListerExpansion +} + +// ciliumBGPNodeConfigLister implements the CiliumBGPNodeConfigLister interface. +type ciliumBGPNodeConfigLister struct { + indexer cache.Indexer +} + +// NewCiliumBGPNodeConfigLister returns a new CiliumBGPNodeConfigLister. +func NewCiliumBGPNodeConfigLister(indexer cache.Indexer) CiliumBGPNodeConfigLister { + return &ciliumBGPNodeConfigLister{indexer: indexer} +} + +// List lists all CiliumBGPNodeConfigs in the indexer. +func (s *ciliumBGPNodeConfigLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPNodeConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CiliumBGPNodeConfig)) + }) + return ret, err +} + +// Get retrieves the CiliumBGPNodeConfig from the index for a given name. +func (s *ciliumBGPNodeConfigLister) Get(name string) (*v2alpha1.CiliumBGPNodeConfig, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("ciliumbgpnodeconfig"), name) + } + return obj.(*v2alpha1.CiliumBGPNodeConfig), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..09ea31a289 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigOverrideLister helps list CiliumBGPNodeConfigOverrides. +// All objects returned here must be treated as read-only. +type CiliumBGPNodeConfigOverrideLister interface { + // List lists all CiliumBGPNodeConfigOverrides in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPNodeConfigOverride, err error) + // Get retrieves the CiliumBGPNodeConfigOverride from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v2alpha1.CiliumBGPNodeConfigOverride, error) + CiliumBGPNodeConfigOverrideListerExpansion +} + +// ciliumBGPNodeConfigOverrideLister implements the CiliumBGPNodeConfigOverrideLister interface. +type ciliumBGPNodeConfigOverrideLister struct { + indexer cache.Indexer +} + +// NewCiliumBGPNodeConfigOverrideLister returns a new CiliumBGPNodeConfigOverrideLister. +func NewCiliumBGPNodeConfigOverrideLister(indexer cache.Indexer) CiliumBGPNodeConfigOverrideLister { + return &ciliumBGPNodeConfigOverrideLister{indexer: indexer} +} + +// List lists all CiliumBGPNodeConfigOverrides in the indexer. +func (s *ciliumBGPNodeConfigOverrideLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPNodeConfigOverride, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CiliumBGPNodeConfigOverride)) + }) + return ret, err +} + +// Get retrieves the CiliumBGPNodeConfigOverride from the index for a given name. +func (s *ciliumBGPNodeConfigOverrideLister) Get(name string) (*v2alpha1.CiliumBGPNodeConfigOverride, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("ciliumbgpnodeconfigoverride"), name) + } + return obj.(*v2alpha1.CiliumBGPNodeConfigOverride), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go new file mode 100644 index 0000000000..e8965566f0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CiliumBGPPeerConfigLister helps list CiliumBGPPeerConfigs. +// All objects returned here must be treated as read-only. +type CiliumBGPPeerConfigLister interface { + // List lists all CiliumBGPPeerConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPPeerConfig, err error) + // Get retrieves the CiliumBGPPeerConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v2alpha1.CiliumBGPPeerConfig, error) + CiliumBGPPeerConfigListerExpansion +} + +// ciliumBGPPeerConfigLister implements the CiliumBGPPeerConfigLister interface. +type ciliumBGPPeerConfigLister struct { + indexer cache.Indexer +} + +// NewCiliumBGPPeerConfigLister returns a new CiliumBGPPeerConfigLister. +func NewCiliumBGPPeerConfigLister(indexer cache.Indexer) CiliumBGPPeerConfigLister { + return &ciliumBGPPeerConfigLister{indexer: indexer} +} + +// List lists all CiliumBGPPeerConfigs in the indexer. +func (s *ciliumBGPPeerConfigLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPPeerConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CiliumBGPPeerConfig)) + }) + return ret, err +} + +// Get retrieves the CiliumBGPPeerConfig from the index for a given name. +func (s *ciliumBGPPeerConfigLister) Get(name string) (*v2alpha1.CiliumBGPPeerConfig, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("ciliumbgppeerconfig"), name) + } + return obj.(*v2alpha1.CiliumBGPPeerConfig), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go index 8d348d108d..0a2e72c1f3 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go @@ -5,6 +5,26 @@ package v2alpha1 +// CiliumBGPAdvertisementListerExpansion allows custom methods to be added to +// CiliumBGPAdvertisementLister. +type CiliumBGPAdvertisementListerExpansion interface{} + +// CiliumBGPClusterConfigListerExpansion allows custom methods to be added to +// CiliumBGPClusterConfigLister. +type CiliumBGPClusterConfigListerExpansion interface{} + +// CiliumBGPNodeConfigListerExpansion allows custom methods to be added to +// CiliumBGPNodeConfigLister. +type CiliumBGPNodeConfigListerExpansion interface{} + +// CiliumBGPNodeConfigOverrideListerExpansion allows custom methods to be added to +// CiliumBGPNodeConfigOverrideLister. +type CiliumBGPNodeConfigOverrideListerExpansion interface{} + +// CiliumBGPPeerConfigListerExpansion allows custom methods to be added to +// CiliumBGPPeerConfigLister. +type CiliumBGPPeerConfigListerExpansion interface{} + // CiliumBGPPeeringPolicyListerExpansion allows custom methods to be added to // CiliumBGPPeeringPolicyLister. type CiliumBGPPeeringPolicyListerExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go index 63de1bc6a4..f76b229b0b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go @@ -1175,172 +1175,174 @@ func init() { } var fileDescriptor_871504499faea14d = []byte{ - // 2636 bytes of a gzipped FileDescriptorProto + // 2659 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcf, 0x6f, 0x23, 0x49, 0xf5, 0x9f, 0xb6, 0xe3, 0xc4, 0x7e, 0x4e, 0x9c, 0x4d, 0xcd, 0x66, 0xd5, 0x93, 0xef, 0x6c, 0x92, - 0x6f, 0x0f, 0x42, 0xc3, 0x2f, 0x5b, 0x33, 0x10, 0x98, 0x9d, 0xd9, 0x19, 0x26, 0x76, 0x32, 0x3b, - 0xde, 0x4d, 0x66, 0x9b, 0x72, 0xb4, 0x20, 0x38, 0xb0, 0x9d, 0xee, 0x8a, 0xdd, 0x1b, 0xbb, 0xdb, - 0x74, 0x95, 0x33, 0x6b, 0x09, 0xc1, 0x22, 0x04, 0x5a, 0x60, 0x24, 0xf8, 0x07, 0xb8, 0x70, 0xe3, - 0x0c, 0x17, 0xc4, 0x19, 0x31, 0xdc, 0x96, 0xdb, 0x08, 0xa1, 0xb0, 0x13, 0x24, 0x6e, 0x1c, 0x10, - 0x27, 0x82, 0x84, 0x50, 0x55, 0x57, 0x57, 0x77, 0x3b, 0xf6, 0x4c, 0xc6, 0x8e, 0x14, 0x71, 0x72, - 0xf7, 0x7b, 0xaf, 0xde, 0xa7, 0x5e, 0xd5, 0xab, 0xf7, 0xa3, 0xdc, 0xb0, 0xd9, 0x74, 0x59, 0xab, - 0xb7, 0x5b, 0xb6, 0xfd, 0x4e, 0xc5, 0x76, 0xdb, 0x6e, 0x4f, 0xfd, 0x74, 0xf7, 0x9b, 0x95, 0xfd, - 0x1b, 0xb4, 0x42, 0xdb, 0x6e, 0x47, 0x3c, 0x58, 0x5d, 0xb7, 0x62, 0xfb, 0x01, 0xa9, 0x1c, 0x5c, - 0xab, 0x34, 0x89, 0x47, 0x02, 0x8b, 0x11, 0xa7, 0xdc, 0x0d, 0x7c, 0xe6, 0xa3, 0xb5, 0x58, 0x4d, - 0x39, 0x1c, 0x1f, 0xfd, 0x74, 0xf7, 0x9b, 0xe5, 0xfd, 0x1b, 0xb4, 0xcc, 0xd5, 0x88, 0x07, 0xab, - 0xeb, 0x96, 0xb9, 0x9a, 0xf2, 0xc1, 0xb5, 0xa5, 0x7b, 0x2f, 0x84, 0x4e, 0x2b, 0x1d, 0xc2, 0xac, - 0x21, 0xf0, 0x4b, 0x9f, 0x4b, 0xe8, 0x69, 0xfa, 0x4d, 0xbf, 0x22, 0xc8, 0xbb, 0xbd, 0x3d, 0xf1, - 0x26, 0x5e, 0xc4, 0x93, 0x14, 0xe7, 0x0a, 0xcb, 0xae, 0xcf, 0x75, 0x76, 0x2c, 0xbb, 0xe5, 0x7a, - 0x24, 0xe8, 0x0b, 0xc4, 0xa0, 0xe7, 0x31, 0xb7, 0x43, 0x4e, 0xe8, 0xff, 0xe2, 0xf3, 0x06, 0x50, - 0xbb, 0x45, 0x3a, 0xd6, 0xe0, 0x38, 0x63, 0x0b, 0x4a, 0xb5, 0xb6, 0x4b, 0x3c, 0x56, 0x37, 0x6b, - 0xbe, 0xb7, 0xe7, 0x36, 0xd1, 0x4d, 0x28, 0xf1, 0x01, 0x7e, 0x8f, 0x35, 0x88, 0xed, 0x7b, 0x0e, - 0xd5, 0xb5, 0x55, 0xed, 0x6a, 0xae, 0x8a, 0x8e, 0x0e, 0x57, 0x4a, 0x3b, 0x29, 0x0e, 0x1e, 0x90, - 0x34, 0x7e, 0x9b, 0x81, 0x42, 0xcd, 0xf7, 0x98, 0xc5, 0xf1, 0xd1, 0x2a, 0x4c, 0x79, 0x56, 0x87, - 0x88, 0xf1, 0x85, 0xea, 0xec, 0xe3, 0xc3, 0x95, 0x0b, 0x47, 0x87, 0x2b, 0x53, 0x0f, 0xac, 0x0e, - 0xc1, 0x82, 0x83, 0xae, 0x40, 0xce, 0xed, 0x58, 0x4d, 0xa2, 0x67, 0x84, 0xc8, 0x9c, 0x14, 0xc9, - 0xd5, 0x39, 0x11, 0x87, 0x3c, 0xe4, 0x42, 0xae, 0xeb, 0x07, 0x8c, 0xea, 0xd3, 0xab, 0xd9, 0xab, - 0xc5, 0xeb, 0x1b, 0xe5, 0xb1, 0x76, 0xb2, 0xac, 0xe6, 0x65, 0xfa, 0x01, 0x8b, 0xa1, 0xf8, 0x1b, - 0xc5, 0x21, 0x02, 0xfa, 0x36, 0xcc, 0x1e, 0xf8, 0xed, 0x5e, 0x87, 0x6c, 0xfb, 0x3d, 0x8f, 0x51, - 0xbd, 0x20, 0x10, 0xab, 0x63, 0x22, 0xbe, 0x13, 0xab, 0xaa, 0xbe, 0x2c, 0xf1, 0x66, 0x13, 0x44, - 0x8a, 0x53, 0x68, 0xc6, 0x7f, 0x34, 0x98, 0x4b, 0xcd, 0xf2, 0x14, 0x2b, 0xf8, 0x59, 0xc8, 0xb7, - 0x7c, 0xca, 0xb8, 0xb4, 0x58, 0xc4, 0x5c, 0xf5, 0x25, 0x29, 0x95, 0xbf, 0x2f, 0xe9, 0x58, 0x49, - 0xa0, 0x5b, 0x30, 0x67, 0x27, 0x01, 0xf4, 0xac, 0x18, 0xb2, 0x28, 0x87, 0xa4, 0xd1, 0x71, 0x5a, - 0x16, 0xdd, 0x80, 0xbc, 0xf0, 0x19, 0xdb, 0x6f, 0xeb, 0x53, 0x62, 0x42, 0x97, 0x23, 0x28, 0x53, - 0xd2, 0x8f, 0x13, 0xcf, 0x58, 0x49, 0xa3, 0x4f, 0xc2, 0x34, 0x9f, 0x42, 0xdd, 0xd4, 0x73, 0x62, - 0x5c, 0x49, 0x8e, 0x9b, 0xbe, 0x2f, 0xa8, 0x58, 0x72, 0x8d, 0x1f, 0x68, 0x50, 0x52, 0x53, 0x68, - 0x30, 0x8b, 0x11, 0x44, 0x61, 0x26, 0xe8, 0x79, 0x9e, 0xeb, 0x35, 0x85, 0x79, 0xc5, 0xeb, 0x5b, - 0x93, 0x6e, 0xbf, 0xd0, 0x8b, 0x43, 0x9d, 0xd5, 0xe2, 0xd1, 0xe1, 0xca, 0x8c, 0x7c, 0xc1, 0x11, - 0x92, 0xf1, 0x43, 0x0d, 0x16, 0x87, 0xca, 0xa3, 0x0e, 0x14, 0x28, 0xb3, 0x02, 0x46, 0x9c, 0x75, - 0x26, 0x76, 0xa5, 0x78, 0xfd, 0xf5, 0x17, 0x9b, 0x10, 0x2d, 0xf3, 0x10, 0xc1, 0x67, 0xc4, 0xcf, - 0x52, 0x75, 0x41, 0x2e, 0x45, 0xa1, 0x11, 0xa9, 0xc5, 0x31, 0x82, 0xf1, 0x6b, 0x0d, 0xe6, 0x53, - 0x13, 0xe9, 0x51, 0xf4, 0x1e, 0xe4, 0x28, 0x9f, 0x92, 0x5c, 0x8f, 0xcd, 0x33, 0x59, 0x8f, 0xf8, - 0x3c, 0x84, 0xe6, 0x86, 0x10, 0x68, 0x0d, 0x8a, 0xca, 0x07, 0xea, 0x1b, 0x7a, 0x5e, 0xec, 0xde, - 0x45, 0x29, 0x5a, 0xac, 0xc5, 0x2c, 0x9c, 0x94, 0x33, 0xbe, 0x0a, 0xf3, 0x9b, 0x9e, 0xd3, 0xf5, - 0x5d, 0x8f, 0xad, 0x3b, 0x4e, 0x40, 0x28, 0x45, 0x4b, 0x90, 0x71, 0xbb, 0xd2, 0x8f, 0x41, 0x2a, - 0xc8, 0xd4, 0x4d, 0x9c, 0x71, 0xbb, 0xe8, 0x2a, 0xe4, 0x3d, 0xdf, 0x21, 0xdc, 0xab, 0xa5, 0x63, - 0xcd, 0x72, 0xa7, 0x7a, 0x20, 0x69, 0x58, 0x71, 0x8d, 0x47, 0x1a, 0xcc, 0x46, 0x9a, 0x4f, 0x79, - 0x40, 0x56, 0x61, 0xaa, 0x1b, 0x1f, 0x0e, 0x25, 0x21, 0x1c, 0x5c, 0x70, 0x52, 0x7e, 0x9d, 0x7d, - 0x11, 0xbf, 0x36, 0xfe, 0xa9, 0x41, 0x29, 0x9a, 0x4e, 0xa3, 0xb7, 0x4b, 0x09, 0x43, 0x0f, 0xa1, - 0x60, 0x85, 0x26, 0x13, 0x1e, 0x38, 0x79, 0xf8, 0xb8, 0x37, 0xe6, 0x0e, 0x0d, 0x2c, 0x61, 0xec, - 0x2a, 0xeb, 0x11, 0x00, 0x8e, 0xb1, 0x50, 0x2b, 0x8a, 0x92, 0x59, 0x01, 0x5a, 0x9b, 0x10, 0x74, - 0x74, 0x90, 0x34, 0xfe, 0xa1, 0x41, 0x21, 0x12, 0xa3, 0x28, 0x80, 0x3c, 0x77, 0x68, 0xc7, 0x62, - 0x96, 0x3c, 0x10, 0xd5, 0x71, 0x0f, 0xc4, 0xdb, 0xbb, 0xef, 0x11, 0x9b, 0x6d, 0x13, 0x66, 0x55, - 0x91, 0x44, 0x86, 0x98, 0x86, 0x15, 0x0e, 0xea, 0xc2, 0x0c, 0x15, 0xcb, 0x4d, 0xf5, 0x8c, 0xb0, - 0x76, 0x73, 0x42, 0x6b, 0xc3, 0xcd, 0xab, 0xce, 0x4b, 0xd4, 0x99, 0xf0, 0x9d, 0xe2, 0x08, 0xc6, - 0xf8, 0x9b, 0x06, 0x73, 0xca, 0xe6, 0x2d, 0x97, 0x32, 0xe4, 0x9d, 0xb0, 0xfb, 0xee, 0xb8, 0x76, - 0x73, 0x7d, 0xc2, 0x6a, 0x15, 0xba, 0x23, 0x4a, 0xc2, 0x66, 0x02, 0x39, 0x97, 0x91, 0x4e, 0x64, - 0xf1, 0xdd, 0x09, 0x2d, 0xa6, 0x89, 0x64, 0xcb, 0xd5, 0xe2, 0x50, 0xbb, 0xf1, 0x7b, 0x0d, 0x2e, - 0x6e, 0xf9, 0x96, 0x53, 0xb5, 0xda, 0x96, 0x67, 0x93, 0xa0, 0xee, 0x35, 0x9f, 0x7b, 0x7e, 0x65, - 0x0e, 0x12, 0x07, 0x31, 0x4c, 0xe4, 0xa9, 0x1c, 0xe4, 0x89, 0x33, 0x1c, 0x49, 0xa0, 0xbd, 0xc8, - 0x51, 0xa7, 0x84, 0x21, 0xeb, 0x63, 0x1a, 0xc2, 0x5d, 0x32, 0x8c, 0x88, 0x23, 0xdc, 0xf4, 0x27, - 0x1a, 0xa0, 0xa4, 0x25, 0x32, 0x7c, 0xf6, 0x60, 0xc6, 0x0d, 0x6d, 0x92, 0xc7, 0xf3, 0xcd, 0x31, - 0x27, 0x30, 0x64, 0x95, 0x62, 0x07, 0x92, 0x04, 0x1c, 0x61, 0x19, 0xdf, 0x85, 0x02, 0x0f, 0x4a, - 0xb4, 0x6b, 0xd9, 0xe4, 0x3c, 0xce, 0x8c, 0xf0, 0x60, 0x35, 0x83, 0xff, 0x65, 0x0f, 0x56, 0x46, - 0x8c, 0xf0, 0xe0, 0xc7, 0x19, 0x98, 0xe2, 0xa9, 0xe3, 0x5c, 0x22, 0x93, 0x05, 0x53, 0xb4, 0x4b, - 0x6c, 0x99, 0x9b, 0xbf, 0x3c, 0xae, 0x89, 0xbe, 0x43, 0x1a, 0x5d, 0x62, 0xc7, 0xe9, 0x8a, 0xbf, - 0x61, 0xa1, 0x1a, 0xb9, 0x30, 0x4d, 0x85, 0x2b, 0x8b, 0x64, 0x35, 0xfe, 0x01, 0x12, 0x20, 0xe1, - 0x01, 0x52, 0xf5, 0x58, 0xf8, 0x8e, 0x25, 0x80, 0xd1, 0x81, 0x22, 0x97, 0x8a, 0x72, 0xf8, 0xe7, - 0x61, 0x8a, 0xf5, 0xbb, 0x51, 0xb2, 0x5d, 0x89, 0xe6, 0xb6, 0xd3, 0xef, 0x92, 0xe3, 0xc3, 0x95, - 0xf9, 0x84, 0x28, 0x27, 0x61, 0x21, 0x8c, 0x3e, 0x05, 0x33, 0x32, 0x49, 0xc9, 0xd8, 0xa0, 0xce, - 0x88, 0x94, 0xc5, 0x11, 0xdf, 0xf8, 0x25, 0x77, 0x51, 0xdf, 0x21, 0x35, 0xdf, 0x73, 0x5c, 0xe6, - 0xfa, 0x1e, 0x5a, 0x4b, 0x21, 0xfe, 0xff, 0x00, 0xe2, 0x42, 0x4a, 0x38, 0x81, 0xf9, 0x9a, 0x5a, - 0xa2, 0x4c, 0x6a, 0xa0, 0xb4, 0x8f, 0x4f, 0x56, 0x0d, 0x4b, 0x9b, 0xcc, 0x4b, 0xd5, 0x80, 0x58, - 0xd4, 0xf7, 0x06, 0x4b, 0x55, 0x2c, 0xa8, 0x58, 0x72, 0x8d, 0x3f, 0x6b, 0x20, 0x0a, 0x94, 0x73, - 0x39, 0x49, 0xef, 0xa6, 0x4f, 0xd2, 0xad, 0x09, 0x3c, 0x60, 0xc4, 0x21, 0xfa, 0x97, 0x34, 0x8f, - 0xfb, 0x1d, 0xdf, 0xc2, 0xae, 0xef, 0xd4, 0xea, 0x1b, 0x58, 0x6e, 0x84, 0xda, 0x42, 0x33, 0x24, - 0xe3, 0x88, 0xcf, 0x4b, 0x39, 0xf9, 0x48, 0xf5, 0x99, 0xd5, 0x6c, 0x54, 0xca, 0x49, 0x39, 0x8a, - 0x15, 0x17, 0x5d, 0x07, 0xe8, 0x06, 0xfe, 0x81, 0xeb, 0x88, 0xca, 0x32, 0xac, 0xbb, 0xd4, 0xd9, - 0x32, 0x15, 0x07, 0x27, 0xa4, 0x90, 0x03, 0xd3, 0xbc, 0xc6, 0x64, 0x54, 0xcf, 0x09, 0xc3, 0x5f, - 0x1f, 0xd3, 0xf0, 0x1d, 0xae, 0x24, 0xde, 0x5a, 0xf1, 0x4a, 0xb1, 0xd4, 0x6d, 0xfc, 0x5b, 0x03, - 0x88, 0x0f, 0x07, 0x7a, 0x1f, 0xc0, 0x8e, 0x9c, 0x25, 0x4a, 0x5a, 0x1b, 0x13, 0xac, 0xb8, 0xf2, - 0xbc, 0xd8, 0x5c, 0x45, 0xa2, 0x38, 0x81, 0x85, 0x68, 0xb2, 0x96, 0xcc, 0x4d, 0xd4, 0x8a, 0x26, - 0xce, 0xe6, 0xb3, 0xeb, 0x48, 0xe3, 0x77, 0x19, 0xc8, 0x9a, 0xbe, 0x73, 0x2e, 0xd1, 0xf3, 0xdd, - 0x54, 0xf4, 0xbc, 0x33, 0x76, 0x65, 0xe0, 0x8c, 0x0c, 0x9e, 0xad, 0x81, 0xe0, 0x79, 0x77, 0x02, - 0x8c, 0x67, 0xc7, 0xce, 0x27, 0x59, 0x98, 0xe5, 0x6e, 0xaf, 0x62, 0xd9, 0x17, 0x52, 0xb1, 0x6c, - 0x75, 0x20, 0x96, 0xbd, 0x94, 0x94, 0x3d, 0x9b, 0x50, 0xd6, 0x87, 0xb9, 0xb6, 0x45, 0x99, 0x19, - 0xf8, 0xbb, 0x84, 0xf7, 0x9a, 0xd2, 0xe4, 0xc9, 0xfa, 0x55, 0x75, 0x55, 0xb0, 0x95, 0x54, 0x8d, - 0xd3, 0x48, 0xe8, 0x43, 0x0d, 0x10, 0xa7, 0xec, 0x04, 0x96, 0x47, 0x43, 0x93, 0x5c, 0xd9, 0xdc, - 0x4d, 0x3a, 0x81, 0x25, 0x39, 0x01, 0xb4, 0x75, 0x42, 0x3f, 0x1e, 0x82, 0x79, 0xda, 0x80, 0xce, - 0x83, 0x5c, 0x87, 0x50, 0x6a, 0x35, 0x89, 0x3e, 0x9d, 0x0e, 0x72, 0xdb, 0x21, 0x19, 0x47, 0x7c, - 0xe3, 0x0a, 0xe4, 0x4c, 0xdf, 0xa9, 0x9b, 0xcf, 0x2a, 0x8a, 0x8d, 0x3f, 0x69, 0xc0, 0xc3, 0xe3, - 0xb9, 0xe4, 0x87, 0x6f, 0xa6, 0xf3, 0xc3, 0xcd, 0xf1, 0x9d, 0x7c, 0x44, 0x7a, 0xf8, 0x55, 0x56, - 0x18, 0x27, 0xb2, 0xc3, 0x07, 0x1a, 0x94, 0x5c, 0xcf, 0x65, 0xea, 0x36, 0x80, 0xea, 0x2f, 0x4f, - 0x54, 0xe0, 0x29, 0x45, 0xd5, 0x57, 0x24, 0x78, 0xa9, 0x9e, 0xd2, 0x8f, 0x07, 0xf0, 0x10, 0x13, - 0x21, 0x3a, 0x42, 0xcf, 0x9c, 0x11, 0x7a, 0x32, 0x3c, 0x47, 0xc8, 0x09, 0x1c, 0xf4, 0x26, 0x20, - 0x4a, 0x82, 0x03, 0xd7, 0x26, 0xeb, 0xb6, 0xed, 0xf7, 0x3c, 0x26, 0x2e, 0x30, 0xc2, 0x3b, 0x12, - 0xe5, 0xa5, 0x8d, 0x13, 0x12, 0x78, 0xc8, 0x28, 0xde, 0x42, 0xa9, 0x2b, 0x10, 0x48, 0xb7, 0x50, - 0x27, 0xaf, 0x41, 0xd0, 0x1a, 0x14, 0x79, 0x3b, 0xf5, 0x80, 0xb0, 0x87, 0x7e, 0xb0, 0xaf, 0x17, - 0x57, 0xb5, 0xab, 0xf9, 0xf8, 0x5a, 0xe6, 0x7e, 0xcc, 0xc2, 0x49, 0x39, 0xe3, 0x17, 0x39, 0x28, - 0xa8, 0xc0, 0x85, 0x2a, 0x90, 0xeb, 0xb6, 0x2c, 0x1a, 0x05, 0xa4, 0x4b, 0xaa, 0x89, 0xe2, 0xc4, - 0xe3, 0x30, 0x69, 0x8b, 0x67, 0x1c, 0xca, 0xa1, 0x87, 0xa9, 0x44, 0x98, 0x99, 0xe8, 0x9a, 0x21, - 0x19, 0xed, 0x9e, 0x9b, 0x07, 0x4f, 0x79, 0x7d, 0x88, 0xae, 0xf0, 0xce, 0xd2, 0xa9, 0x9b, 0xf2, - 0x00, 0x27, 0xda, 0x42, 0xa7, 0x6e, 0xe2, 0x90, 0xc7, 0x6b, 0x08, 0xf1, 0x40, 0xf5, 0xd9, 0x89, - 0x6a, 0x08, 0xa1, 0x34, 0x9e, 0x8a, 0x78, 0xa5, 0x58, 0xea, 0x46, 0xae, 0xbc, 0x27, 0x14, 0x61, - 0x6f, 0xe6, 0x0c, 0xc2, 0xde, 0x9c, 0xba, 0x23, 0x14, 0x91, 0x2e, 0xd6, 0x8e, 0x7e, 0xaa, 0xc1, - 0x82, 0x9d, 0xbe, 0x23, 0x24, 0x54, 0xcf, 0x4f, 0x74, 0xf5, 0x34, 0x70, 0xe7, 0xa8, 0x9c, 0x63, - 0xa1, 0x36, 0x08, 0x84, 0x4f, 0x62, 0xa3, 0x5b, 0x90, 0xff, 0x96, 0x4f, 0x6b, 0x6d, 0x8b, 0x52, - 0xbd, 0x90, 0xea, 0x15, 0xf2, 0x5f, 0x79, 0xbb, 0x21, 0xe8, 0xc7, 0x87, 0x2b, 0x45, 0xd3, 0x77, - 0xa2, 0x57, 0xac, 0x06, 0x18, 0x3f, 0xd2, 0x00, 0xe2, 0xde, 0x5e, 0x5d, 0xdf, 0x69, 0xa7, 0xba, - 0xbe, 0xcb, 0xbc, 0xd0, 0xb5, 0xf4, 0x0a, 0xe4, 0x48, 0x10, 0xf8, 0x81, 0xac, 0x3e, 0x0b, 0xdc, - 0x57, 0x36, 0x39, 0x01, 0x87, 0x74, 0xe3, 0x0f, 0x53, 0x30, 0xdd, 0x20, 0x76, 0x40, 0xd8, 0xb9, - 0x94, 0x43, 0x9f, 0x81, 0x82, 0xdb, 0xe9, 0xf4, 0x98, 0xb5, 0xdb, 0x26, 0xc2, 0xf5, 0xf3, 0xa1, - 0x1b, 0xd4, 0x23, 0x22, 0x8e, 0xf9, 0x28, 0x80, 0x29, 0x31, 0xb9, 0xf0, 0x5c, 0xbe, 0x31, 0xe6, - 0xc6, 0x87, 0xd6, 0x96, 0x37, 0x2c, 0x66, 0x6d, 0x7a, 0x2c, 0xe8, 0xab, 0x7c, 0x3f, 0xc5, 0x49, - 0x3f, 0xfe, 0xcb, 0x4a, 0xae, 0xda, 0x67, 0x84, 0x62, 0x81, 0x85, 0xbe, 0xa7, 0x01, 0x50, 0x16, - 0xb8, 0x5e, 0x93, 0x73, 0x65, 0x6d, 0xbc, 0x3d, 0x19, 0x74, 0x43, 0xe9, 0x0b, 0x27, 0xa0, 0x96, - 0x28, 0x66, 0xe0, 0x04, 0x28, 0x2a, 0xcb, 0xb2, 0x2a, 0x9b, 0x8a, 0xbb, 0x51, 0x59, 0x05, 0xa1, - 0xd6, 0xb8, 0xa0, 0x5a, 0xfa, 0x12, 0x14, 0x94, 0x72, 0xf4, 0x12, 0x64, 0xf7, 0x49, 0x3f, 0x8c, - 0x80, 0x98, 0x3f, 0xa2, 0x97, 0x21, 0x77, 0x60, 0xb5, 0x7b, 0xe1, 0x45, 0xd6, 0x2c, 0x0e, 0x5f, - 0x6e, 0x66, 0x6e, 0x68, 0x4b, 0xb7, 0x61, 0x7e, 0x60, 0x6e, 0xcf, 0x1b, 0x5e, 0x48, 0x0c, 0x37, - 0x3e, 0xd6, 0x40, 0x4e, 0xe6, 0x5c, 0x4a, 0x82, 0xdd, 0x74, 0x49, 0x70, 0x7b, 0xa2, 0x4d, 0x1a, - 0x51, 0x15, 0xfc, 0x31, 0x03, 0x33, 0x32, 0xdf, 0x9d, 0xcb, 0x79, 0x71, 0x52, 0xed, 0x43, 0x75, - 0x6c, 0x13, 0x85, 0x05, 0x23, 0x5b, 0x88, 0xf6, 0x40, 0x0b, 0xb1, 0x31, 0x21, 0xce, 0xb3, 0xdb, - 0x88, 0x23, 0x0d, 0x8a, 0x52, 0xf2, 0x5c, 0xfc, 0xc6, 0x4e, 0xfb, 0xcd, 0x9d, 0xc9, 0x8c, 0x1d, - 0xe1, 0x38, 0xbf, 0x89, 0x8d, 0x3c, 0xe5, 0xbf, 0x3a, 0xe3, 0x07, 0xfd, 0x28, 0xa1, 0x64, 0x47, - 0x26, 0x14, 0x59, 0x8b, 0x89, 0xff, 0x47, 0x73, 0xe9, 0xbf, 0x54, 0x1f, 0x48, 0x3a, 0x56, 0x12, - 0xc6, 0xa3, 0xa2, 0x9a, 0xbb, 0x28, 0x87, 0x9b, 0xd1, 0xf5, 0xb6, 0x36, 0x51, 0xc3, 0x9e, 0x58, - 0x8e, 0x11, 0xff, 0x55, 0x7f, 0x07, 0xf2, 0x94, 0xb4, 0x89, 0xcd, 0xfc, 0x40, 0x6e, 0x8e, 0x39, - 0xb9, 0xc7, 0x97, 0x1b, 0x52, 0x65, 0x18, 0x7c, 0x95, 0xe1, 0x11, 0x19, 0x2b, 0x4c, 0x54, 0x81, - 0x82, 0xdd, 0xee, 0x51, 0x46, 0x82, 0xba, 0x29, 0xa3, 0xaf, 0xba, 0x59, 0xa8, 0x45, 0x0c, 0x1c, - 0xcb, 0xa0, 0x32, 0x80, 0x7a, 0xa1, 0x3a, 0x12, 0xb7, 0x43, 0x25, 0x51, 0xf6, 0x29, 0x2a, 0x4e, - 0x48, 0xa0, 0x8a, 0x8c, 0xec, 0xe1, 0x5f, 0x82, 0xff, 0x37, 0x10, 0xd9, 0xa3, 0x45, 0x4f, 0xf4, - 0xca, 0xd7, 0xa0, 0x48, 0xde, 0x67, 0x24, 0xf0, 0xac, 0x36, 0x47, 0xc8, 0x09, 0x84, 0x79, 0x5e, - 0x12, 0x6f, 0xc6, 0x64, 0x9c, 0x94, 0x41, 0x3b, 0x30, 0x4f, 0x09, 0xa5, 0xae, 0xef, 0xad, 0xef, - 0xed, 0xf1, 0xae, 0xa2, 0x2f, 0xaa, 0xb5, 0x42, 0xf5, 0xd3, 0x12, 0x6e, 0xbe, 0x91, 0x66, 0x1f, - 0x0b, 0x52, 0x58, 0xbf, 0x4b, 0x12, 0x1e, 0x54, 0x81, 0xee, 0x40, 0xa9, 0x9d, 0xfc, 0x77, 0xc0, - 0x94, 0x5d, 0x81, 0xea, 0x67, 0x52, 0xff, 0x1d, 0x98, 0x78, 0x40, 0x1a, 0x7d, 0x0d, 0xf4, 0x24, - 0xa5, 0xe1, 0xf7, 0x02, 0x9b, 0x60, 0xcb, 0x6b, 0x92, 0xf0, 0x93, 0x84, 0x42, 0xf5, 0xf2, 0xd1, - 0xe1, 0x8a, 0xbe, 0x35, 0x42, 0x06, 0x8f, 0x1c, 0x8d, 0x28, 0x2c, 0x46, 0xe6, 0xef, 0x04, 0xd6, - 0xde, 0x9e, 0x6b, 0x9b, 0x7e, 0xdb, 0xb5, 0xfb, 0xa2, 0x87, 0x28, 0x54, 0x6f, 0xcb, 0x09, 0x2e, - 0x6e, 0x0e, 0x13, 0x3a, 0x3e, 0x5c, 0xb9, 0x2c, 0x6d, 0x1f, 0xca, 0xc7, 0xc3, 0x75, 0xa3, 0x6d, - 0xb8, 0xd8, 0x22, 0x56, 0x9b, 0xb5, 0x6a, 0x2d, 0x62, 0xef, 0x47, 0x67, 0x48, 0x9f, 0x15, 0x67, - 0x2b, 0xda, 0xd7, 0x8b, 0xf7, 0x4f, 0x8a, 0xe0, 0x61, 0xe3, 0xd0, 0xcf, 0x35, 0x58, 0x1c, 0x58, - 0xf1, 0xf0, 0xd3, 0x15, 0xbd, 0x34, 0xd1, 0x17, 0x02, 0x8d, 0x61, 0x3a, 0xab, 0x97, 0xf8, 0x72, - 0x0c, 0x65, 0xe1, 0xe1, 0xb3, 0x40, 0x37, 0x01, 0xdc, 0xee, 0x3d, 0xab, 0xe3, 0xb6, 0x5d, 0x42, - 0xf5, 0x8b, 0x62, 0xbf, 0x96, 0xb8, 0x9f, 0xd7, 0xcd, 0x88, 0xca, 0x63, 0x93, 0x7c, 0xeb, 0xe3, - 0x84, 0x34, 0xda, 0x82, 0x92, 0x7c, 0xeb, 0xcb, 0x8d, 0x59, 0x10, 0x1b, 0xf3, 0x09, 0xd1, 0x05, - 0x9b, 0x49, 0xce, 0xf1, 0x09, 0x0a, 0x1e, 0x18, 0x8b, 0x6a, 0xb0, 0x90, 0xf4, 0x84, 0xb0, 0x22, - 0x5f, 0x14, 0x0a, 0x17, 0x79, 0x35, 0xbf, 0x35, 0xc8, 0xc4, 0x27, 0xe5, 0x91, 0x0f, 0x8b, 0xae, - 0x37, 0xcc, 0x65, 0x5e, 0x11, 0x8a, 0x5e, 0xe3, 0xeb, 0x53, 0xf7, 0x9e, 0xed, 0x2e, 0x43, 0xf9, - 0x78, 0xb8, 0xde, 0xa5, 0x5b, 0x30, 0x97, 0x8a, 0x42, 0x2f, 0x54, 0x66, 0x3d, 0xca, 0xf0, 0xd1, - 0x89, 0xcc, 0x8a, 0xbe, 0xaf, 0xc1, 0x6c, 0xd2, 0x2a, 0x99, 0x36, 0xeb, 0x67, 0xf0, 0xb7, 0x9f, - 0xcc, 0xdd, 0xea, 0xdb, 0x9e, 0x24, 0x0f, 0xa7, 0x40, 0x51, 0x6f, 0x48, 0xf3, 0xbc, 0x3e, 0x6e, - 0xe6, 0x3e, 0x75, 0xeb, 0x6c, 0x7c, 0xa8, 0xc1, 0x70, 0xe7, 0x45, 0x3e, 0xe4, 0x6d, 0xf9, 0xe1, - 0x97, 0x5c, 0x91, 0xb1, 0xbf, 0x24, 0x49, 0x7d, 0x3f, 0x16, 0x5e, 0xf8, 0x47, 0x34, 0xac, 0x40, - 0x8c, 0xbf, 0x6b, 0x90, 0x13, 0x37, 0xed, 0xe8, 0xd5, 0xc4, 0x7e, 0x56, 0x8b, 0xd2, 0x82, 0xec, - 0x5b, 0xa4, 0x1f, 0x6e, 0xee, 0x95, 0xd4, 0xe6, 0xc6, 0xd9, 0xef, 0x1d, 0x4e, 0x94, 0x7b, 0x8d, - 0xd6, 0x60, 0x9a, 0xec, 0xed, 0x11, 0x9b, 0xc9, 0xd4, 0xf3, 0x6a, 0x54, 0x3f, 0x6d, 0x0a, 0x2a, - 0x4f, 0x10, 0x02, 0x2c, 0x7c, 0xc5, 0x52, 0x98, 0xf7, 0xe5, 0xcc, 0xed, 0x90, 0x75, 0xc7, 0x21, - 0xce, 0x99, 0x5c, 0x47, 0x8a, 0x86, 0x6c, 0x27, 0x52, 0x89, 0x63, 0xed, 0xbc, 0x91, 0xbd, 0xc4, - 0x93, 0x93, 0xb3, 0xe5, 0xdb, 0x56, 0x3b, 0x2c, 0x58, 0x31, 0xd9, 0x23, 0x01, 0xf1, 0x6c, 0x82, - 0xae, 0x42, 0xde, 0xea, 0xba, 0x6f, 0x04, 0x7e, 0x2f, 0xba, 0x40, 0x14, 0xeb, 0xb6, 0x6e, 0xd6, - 0x05, 0x0d, 0x2b, 0x2e, 0x2f, 0x58, 0xf6, 0x5d, 0xcf, 0x91, 0xab, 0xa1, 0x0a, 0x96, 0xb7, 0x5c, - 0xcf, 0xc1, 0x82, 0xa3, 0xca, 0xa5, 0xec, 0xa8, 0x72, 0xc9, 0xb8, 0x03, 0xc5, 0xc4, 0x77, 0x67, - 0x3c, 0x75, 0x77, 0xf8, 0x83, 0x69, 0xb1, 0xd6, 0x60, 0xea, 0xde, 0x8e, 0x18, 0x38, 0x96, 0xa9, - 0x7e, 0xe3, 0xf1, 0xd3, 0xe5, 0x0b, 0x1f, 0x3d, 0x5d, 0xbe, 0xf0, 0xe4, 0xe9, 0xf2, 0x85, 0x0f, - 0x8e, 0x96, 0xb5, 0xc7, 0x47, 0xcb, 0xda, 0x47, 0x47, 0xcb, 0xda, 0x93, 0xa3, 0x65, 0xed, 0xe3, - 0xa3, 0x65, 0xed, 0x67, 0x7f, 0x5d, 0xbe, 0xf0, 0xf5, 0xb5, 0xb1, 0xbe, 0xd4, 0xfc, 0x6f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x3f, 0xf4, 0xc9, 0x6c, 0xe1, 0x29, 0x00, 0x00, + 0x6f, 0x0f, 0x42, 0xc3, 0x2f, 0x47, 0x33, 0x10, 0x98, 0x9d, 0xd9, 0x19, 0x26, 0x76, 0x32, 0x3b, + 0xde, 0x4d, 0x66, 0x4d, 0x39, 0x5a, 0x10, 0x1c, 0xd8, 0x4e, 0x77, 0xc5, 0xe9, 0x8d, 0xdd, 0x6d, + 0xba, 0xca, 0x99, 0xb5, 0x84, 0x60, 0x57, 0x08, 0xb4, 0xc0, 0x48, 0xf0, 0x0f, 0x70, 0xe1, 0xc6, + 0x19, 0x2e, 0x88, 0x33, 0xd2, 0x70, 0x5b, 0x6e, 0x23, 0x84, 0xc2, 0x4e, 0x90, 0xb8, 0x71, 0x40, + 0x9c, 0x08, 0x12, 0x42, 0x55, 0x5d, 0x55, 0xdd, 0xed, 0xd8, 0x33, 0x19, 0x3b, 0x52, 0xc4, 0xc9, + 0xdd, 0xef, 0xbd, 0x7a, 0x9f, 0x7a, 0x55, 0xaf, 0xde, 0x8f, 0x72, 0xc3, 0x46, 0xd3, 0x63, 0x7b, + 0xdd, 0x9d, 0xb2, 0x13, 0xb4, 0x57, 0x1c, 0xaf, 0xe5, 0x75, 0xf5, 0x4f, 0x67, 0xbf, 0xb9, 0xb2, + 0x7f, 0x83, 0xae, 0xd0, 0x96, 0xd7, 0x16, 0x0f, 0x76, 0xc7, 0x5b, 0x71, 0x82, 0x90, 0xac, 0x1c, + 0x5c, 0x5b, 0x69, 0x12, 0x9f, 0x84, 0x36, 0x23, 0x6e, 0xb9, 0x13, 0x06, 0x2c, 0x40, 0xab, 0xb1, + 0x9a, 0x72, 0x34, 0x5e, 0xfd, 0x74, 0xf6, 0x9b, 0xe5, 0xfd, 0x1b, 0xb4, 0xcc, 0xd5, 0x88, 0x07, + 0xbb, 0xe3, 0x95, 0xb9, 0x9a, 0xf2, 0xc1, 0xb5, 0x85, 0x7b, 0x2f, 0x84, 0x4e, 0x57, 0xda, 0x84, + 0xd9, 0x03, 0xe0, 0x17, 0xbe, 0x90, 0xd0, 0xd3, 0x0c, 0x9a, 0xc1, 0x8a, 0x20, 0xef, 0x74, 0x77, + 0xc5, 0x9b, 0x78, 0x11, 0x4f, 0x52, 0x9c, 0x2b, 0x2c, 0x7b, 0x01, 0xd7, 0xd9, 0xb6, 0x9d, 0x3d, + 0xcf, 0x27, 0x61, 0x4f, 0x20, 0x86, 0x5d, 0x9f, 0x79, 0x6d, 0x72, 0x42, 0xff, 0x97, 0x9f, 0x37, + 0x80, 0x3a, 0x7b, 0xa4, 0x6d, 0xf7, 0x8f, 0xb3, 0x36, 0xa1, 0x54, 0x6d, 0x79, 0xc4, 0x67, 0xb5, + 0x7a, 0x35, 0xf0, 0x77, 0xbd, 0x26, 0xba, 0x09, 0x25, 0x3e, 0x20, 0xe8, 0xb2, 0x06, 0x71, 0x02, + 0xdf, 0xa5, 0xa6, 0xb1, 0x6c, 0x5c, 0xcd, 0x55, 0xd0, 0xd1, 0xe1, 0x52, 0x69, 0x3b, 0xc5, 0xc1, + 0x7d, 0x92, 0xd6, 0xef, 0x32, 0x50, 0xa8, 0x06, 0x3e, 0xb3, 0x39, 0x3e, 0x5a, 0x86, 0x09, 0xdf, + 0x6e, 0x13, 0x31, 0xbe, 0x50, 0x99, 0x7e, 0x7c, 0xb8, 0x74, 0xe1, 0xe8, 0x70, 0x69, 0xe2, 0x81, + 0xdd, 0x26, 0x58, 0x70, 0xd0, 0x15, 0xc8, 0x79, 0x6d, 0xbb, 0x49, 0xcc, 0x8c, 0x10, 0x99, 0x91, + 0x22, 0xb9, 0x1a, 0x27, 0xe2, 0x88, 0x87, 0x3c, 0xc8, 0x75, 0x82, 0x90, 0x51, 0x73, 0x72, 0x39, + 0x7b, 0xb5, 0x78, 0x7d, 0xbd, 0x3c, 0xd2, 0x4e, 0x96, 0xf5, 0xbc, 0xea, 0x41, 0xc8, 0x62, 0x28, + 0xfe, 0x46, 0x71, 0x84, 0x80, 0xbe, 0x0b, 0xd3, 0x07, 0x41, 0xab, 0xdb, 0x26, 0x5b, 0x41, 0xd7, + 0x67, 0xd4, 0x2c, 0x08, 0xc4, 0xca, 0x88, 0x88, 0xef, 0xc4, 0xaa, 0x2a, 0x2f, 0x4b, 0xbc, 0xe9, + 0x04, 0x91, 0xe2, 0x14, 0x9a, 0xf5, 0x1f, 0x03, 0x66, 0x52, 0xb3, 0x3c, 0xc5, 0x0a, 0x7e, 0x1e, + 0xf2, 0x7b, 0x01, 0x65, 0x5c, 0x5a, 0x2c, 0x62, 0xae, 0xf2, 0x92, 0x94, 0xca, 0xdf, 0x97, 0x74, + 0xac, 0x25, 0xd0, 0x2d, 0x98, 0x71, 0x92, 0x00, 0x66, 0x56, 0x0c, 0x99, 0x97, 0x43, 0xd2, 0xe8, + 0x38, 0x2d, 0x8b, 0x6e, 0x40, 0x5e, 0xf8, 0x8c, 0x13, 0xb4, 0xcc, 0x09, 0x31, 0xa1, 0xcb, 0x0a, + 0xaa, 0x2e, 0xe9, 0xc7, 0x89, 0x67, 0xac, 0xa5, 0xd1, 0xa7, 0x61, 0x92, 0x4f, 0xa1, 0x56, 0x37, + 0x73, 0x62, 0x5c, 0x49, 0x8e, 0x9b, 0xbc, 0x2f, 0xa8, 0x58, 0x72, 0xad, 0x1f, 0x1a, 0x50, 0xd2, + 0x53, 0x68, 0x30, 0x9b, 0x11, 0x44, 0x61, 0x2a, 0xec, 0xfa, 0xbe, 0xe7, 0x37, 0x85, 0x79, 0xc5, + 0xeb, 0x9b, 0xe3, 0x6e, 0xbf, 0xd0, 0x8b, 0x23, 0x9d, 0x95, 0xe2, 0xd1, 0xe1, 0xd2, 0x94, 0x7c, + 0xc1, 0x0a, 0xc9, 0xfa, 0x91, 0x01, 0xf3, 0x03, 0xe5, 0x51, 0x1b, 0x0a, 0x94, 0xd9, 0x21, 0x23, + 0xee, 0x1a, 0x13, 0xbb, 0x52, 0xbc, 0xfe, 0xfa, 0x8b, 0x4d, 0x88, 0x96, 0x79, 0x88, 0xe0, 0x33, + 0xe2, 0x67, 0xa9, 0x32, 0x27, 0x97, 0xa2, 0xd0, 0x50, 0x6a, 0x71, 0x8c, 0x60, 0xfd, 0xc6, 0x80, + 0xd9, 0xd4, 0x44, 0xba, 0x14, 0xbd, 0x07, 0x39, 0xca, 0xa7, 0x24, 0xd7, 0x63, 0xe3, 0x4c, 0xd6, + 0x23, 0x3e, 0x0f, 0x91, 0xb9, 0x11, 0x04, 0x5a, 0x85, 0xa2, 0xf6, 0x81, 0xda, 0xba, 0x99, 0x17, + 0xbb, 0x77, 0x51, 0x8a, 0x16, 0xab, 0x31, 0x0b, 0x27, 0xe5, 0xac, 0xaf, 0xc3, 0xec, 0x86, 0xef, + 0x76, 0x02, 0xcf, 0x67, 0x6b, 0xae, 0x1b, 0x12, 0x4a, 0xd1, 0x02, 0x64, 0xbc, 0x8e, 0xf4, 0x63, + 0x90, 0x0a, 0x32, 0xb5, 0x3a, 0xce, 0x78, 0x1d, 0x74, 0x15, 0xf2, 0x7e, 0xe0, 0x12, 0xee, 0xd5, + 0xd2, 0xb1, 0xa6, 0xb9, 0x53, 0x3d, 0x90, 0x34, 0xac, 0xb9, 0xd6, 0x23, 0x03, 0xa6, 0x95, 0xe6, + 0x53, 0x1e, 0x90, 0x65, 0x98, 0xe8, 0xc4, 0x87, 0x43, 0x4b, 0x08, 0x07, 0x17, 0x9c, 0x94, 0x5f, + 0x67, 0x5f, 0xc4, 0xaf, 0xad, 0x7f, 0x1a, 0x50, 0x52, 0xd3, 0x69, 0x74, 0x77, 0x28, 0x61, 0xe8, + 0x21, 0x14, 0xec, 0xc8, 0x64, 0xc2, 0x03, 0x27, 0x0f, 0x1f, 0xf7, 0x46, 0xdc, 0xa1, 0xbe, 0x25, + 0x8c, 0x5d, 0x65, 0x4d, 0x01, 0xe0, 0x18, 0x0b, 0xed, 0xa9, 0x28, 0x99, 0x15, 0xa0, 0xd5, 0x31, + 0x41, 0x87, 0x07, 0x49, 0xeb, 0x1f, 0x06, 0x14, 0x94, 0x18, 0x45, 0x21, 0xe4, 0xb9, 0x43, 0xbb, + 0x36, 0xb3, 0xe5, 0x81, 0xa8, 0x8c, 0x7a, 0x20, 0xde, 0xde, 0x79, 0x8f, 0x38, 0x6c, 0x8b, 0x30, + 0xbb, 0x82, 0x24, 0x32, 0xc4, 0x34, 0xac, 0x71, 0x50, 0x07, 0xa6, 0xa8, 0x58, 0x6e, 0x6a, 0x66, + 0x84, 0xb5, 0x1b, 0x63, 0x5a, 0x1b, 0x6d, 0x5e, 0x65, 0x56, 0xa2, 0x4e, 0x45, 0xef, 0x14, 0x2b, + 0x18, 0xeb, 0x6f, 0x06, 0xcc, 0x68, 0x9b, 0x37, 0x3d, 0xca, 0x90, 0x7f, 0xc2, 0xee, 0xbb, 0xa3, + 0xda, 0xcd, 0xf5, 0x09, 0xab, 0x75, 0xe8, 0x56, 0x94, 0x84, 0xcd, 0x04, 0x72, 0x1e, 0x23, 0x6d, + 0x65, 0xf1, 0xdd, 0x31, 0x2d, 0xa6, 0x89, 0x64, 0xcb, 0xd5, 0xe2, 0x48, 0xbb, 0xf5, 0x61, 0x06, + 0x2e, 0x6e, 0x06, 0xb6, 0x5b, 0xb1, 0x5b, 0xb6, 0xef, 0x90, 0xb0, 0xe6, 0x37, 0x9f, 0x7b, 0x7e, + 0x65, 0x0e, 0x12, 0x07, 0x31, 0x4a, 0xe4, 0xa9, 0x1c, 0xe4, 0x8b, 0x33, 0xac, 0x24, 0xd0, 0x0d, + 0x98, 0xf4, 0x3a, 0x5b, 0x81, 0x4b, 0xe4, 0x61, 0x5b, 0xe6, 0x89, 0xa0, 0x56, 0xe7, 0x94, 0xe3, + 0xc3, 0x25, 0x94, 0x02, 0x17, 0x54, 0x2c, 0xe5, 0xd1, 0xae, 0x72, 0xf1, 0x09, 0xb1, 0x04, 0x6b, + 0x23, 0x2e, 0x01, 0x77, 0xe6, 0x28, 0x96, 0x0e, 0x71, 0xf0, 0x9f, 0x1a, 0x90, 0x9a, 0x86, 0x0c, + 0xbc, 0x5d, 0x98, 0xf2, 0xa2, 0xd5, 0x90, 0x07, 0xfb, 0xcd, 0x11, 0x27, 0x30, 0x60, 0x7d, 0x63, + 0xd7, 0x93, 0x04, 0xac, 0xb0, 0xac, 0xef, 0x43, 0x81, 0x87, 0x33, 0xda, 0xb1, 0x1d, 0x72, 0x1e, + 0xa7, 0x4d, 0xf8, 0xbe, 0x9e, 0xc1, 0xff, 0xb2, 0xef, 0x6b, 0x23, 0x86, 0xf8, 0xfe, 0xe3, 0x0c, + 0x4c, 0xf0, 0xa4, 0x73, 0x2e, 0x31, 0xcd, 0x86, 0x09, 0xda, 0x21, 0x8e, 0xcc, 0xea, 0x5f, 0x1d, + 0xd5, 0xc4, 0xc0, 0x25, 0x8d, 0x0e, 0x71, 0xe2, 0x44, 0xc7, 0xdf, 0xb0, 0x50, 0x8d, 0x3c, 0x98, + 0xa4, 0xc2, 0x95, 0xc5, 0xc9, 0x1b, 0xfd, 0x00, 0x09, 0x90, 0xe8, 0x00, 0xe9, 0x4a, 0x2e, 0x7a, + 0xc7, 0x12, 0xc0, 0x6a, 0x43, 0x91, 0x4b, 0xa9, 0xec, 0xff, 0x45, 0x98, 0x60, 0xbd, 0x8e, 0x4a, + 0xd3, 0x4b, 0x6a, 0x6e, 0xdb, 0xbd, 0x0e, 0x3f, 0xf3, 0xb3, 0x09, 0x51, 0x4e, 0xc2, 0x42, 0x18, + 0x7d, 0x06, 0xa6, 0x64, 0x7a, 0x93, 0x51, 0x45, 0x9f, 0x11, 0x29, 0x8b, 0x15, 0xdf, 0xfa, 0x15, + 0x77, 0xd1, 0xc0, 0x25, 0xd5, 0xc0, 0x77, 0x3d, 0xe6, 0x05, 0x3e, 0x5a, 0x4d, 0x21, 0xfe, 0x7f, + 0x1f, 0xe2, 0x5c, 0x4a, 0x38, 0x81, 0xf9, 0x9a, 0x5e, 0xa2, 0x4c, 0x6a, 0xa0, 0xb4, 0x8f, 0x4f, + 0x56, 0x0f, 0x4b, 0x9b, 0xcc, 0x8b, 0xdc, 0x90, 0xd8, 0x34, 0xf0, 0xfb, 0x8b, 0x5c, 0x2c, 0xa8, + 0x58, 0x72, 0xad, 0x3f, 0x1b, 0x20, 0x4a, 0x9b, 0x73, 0x39, 0x49, 0xef, 0xa6, 0x4f, 0xd2, 0xad, + 0x31, 0x3c, 0x60, 0xc8, 0x21, 0xfa, 0x97, 0x34, 0x8f, 0xfb, 0x1d, 0xdf, 0xc2, 0x4e, 0xe0, 0x56, + 0x6b, 0xeb, 0x58, 0x6e, 0x84, 0xde, 0xc2, 0x7a, 0x44, 0xc6, 0x8a, 0xcf, 0x8b, 0x40, 0xf9, 0x48, + 0xcd, 0xa9, 0xe5, 0xac, 0x2a, 0x02, 0xa5, 0x1c, 0xc5, 0x9a, 0x8b, 0xae, 0x03, 0x74, 0xc2, 0xe0, + 0xc0, 0x73, 0x45, 0x4d, 0x1a, 0x25, 0x11, 0x7d, 0xb6, 0xea, 0x9a, 0x83, 0x13, 0x52, 0xc8, 0x85, + 0x49, 0x5e, 0x9d, 0x32, 0x6a, 0xe6, 0x84, 0xe1, 0xaf, 0x8f, 0x68, 0xf8, 0x36, 0x57, 0x12, 0x6f, + 0xad, 0x78, 0xa5, 0x58, 0xea, 0xb6, 0xfe, 0x6d, 0x00, 0xc4, 0x87, 0x03, 0xbd, 0x0f, 0xe0, 0x28, + 0x67, 0x51, 0x49, 0x6b, 0x7d, 0x8c, 0x15, 0xd7, 0x9e, 0x17, 0x9b, 0xab, 0x49, 0x14, 0x27, 0xb0, + 0x10, 0x4d, 0x56, 0xa1, 0xb9, 0xb1, 0x9a, 0xd8, 0xc4, 0xd9, 0x7c, 0x76, 0x05, 0x6a, 0xfd, 0x3e, + 0x03, 0xd9, 0x7a, 0xe0, 0x9e, 0x4b, 0xf4, 0x7c, 0x37, 0x15, 0x3d, 0xef, 0x8c, 0x5c, 0x19, 0xb8, + 0x43, 0x83, 0xe7, 0x5e, 0x5f, 0xf0, 0xbc, 0x3b, 0x06, 0xc6, 0xb3, 0x63, 0xe7, 0x93, 0x2c, 0x4c, + 0x73, 0xb7, 0xd7, 0xb1, 0xec, 0x4b, 0xa9, 0x58, 0xb6, 0xdc, 0x17, 0xcb, 0x5e, 0x4a, 0xca, 0x9e, + 0x4d, 0x28, 0xeb, 0xc1, 0x4c, 0xcb, 0xa6, 0xac, 0x1e, 0x06, 0x3b, 0x84, 0x77, 0xa9, 0xd2, 0xe4, + 0xf1, 0x3a, 0x5d, 0x7d, 0xc9, 0xb0, 0x99, 0x54, 0x8d, 0xd3, 0x48, 0xe8, 0x23, 0x03, 0x10, 0xa7, + 0x6c, 0x87, 0xb6, 0x4f, 0x23, 0x93, 0x3c, 0xd9, 0x16, 0x8e, 0x3b, 0x81, 0x05, 0x39, 0x01, 0xb4, + 0x79, 0x42, 0x3f, 0x1e, 0x80, 0x79, 0xda, 0x80, 0xce, 0x83, 0x5c, 0x9b, 0x50, 0x6a, 0x37, 0x89, + 0x39, 0x99, 0x0e, 0x72, 0x5b, 0x11, 0x19, 0x2b, 0xbe, 0x75, 0x05, 0x72, 0xf5, 0xc0, 0xad, 0xd5, + 0x9f, 0x55, 0x4e, 0x5b, 0x7f, 0x32, 0x80, 0x87, 0xc7, 0x73, 0xc9, 0x0f, 0xdf, 0x4e, 0xe7, 0x87, + 0x9b, 0xa3, 0x3b, 0xf9, 0x90, 0xf4, 0xf0, 0xeb, 0xac, 0x30, 0x4e, 0x64, 0x87, 0x0f, 0x0c, 0x28, + 0x79, 0xbe, 0xc7, 0xf4, 0x3d, 0x02, 0x35, 0x5f, 0x1e, 0xab, 0xc0, 0xd3, 0x8a, 0x2a, 0xaf, 0x48, + 0xf0, 0x52, 0x2d, 0xa5, 0x1f, 0xf7, 0xe1, 0x21, 0x26, 0x42, 0xb4, 0x42, 0xcf, 0x9c, 0x11, 0x7a, + 0x32, 0x3c, 0x2b, 0xe4, 0x04, 0x0e, 0x7a, 0x13, 0x10, 0x25, 0xe1, 0x81, 0xe7, 0x90, 0x35, 0xc7, + 0x09, 0xba, 0x3e, 0x13, 0x57, 0x1f, 0xd1, 0xed, 0x8a, 0xf6, 0xd2, 0xc6, 0x09, 0x09, 0x3c, 0x60, + 0x14, 0x6f, 0xbe, 0xf4, 0xe5, 0x09, 0xa4, 0x9b, 0xaf, 0x93, 0x17, 0x28, 0x68, 0x15, 0x8a, 0xbc, + 0x11, 0x7b, 0x40, 0xd8, 0xc3, 0x20, 0xdc, 0x37, 0x8b, 0xcb, 0xc6, 0xd5, 0x7c, 0x7c, 0xa1, 0x73, + 0x3f, 0x66, 0xe1, 0xa4, 0x9c, 0xf5, 0xcb, 0x1c, 0x14, 0x74, 0xe0, 0x42, 0x2b, 0x90, 0xeb, 0xec, + 0xd9, 0x54, 0x05, 0xa4, 0x4b, 0xba, 0x89, 0xe2, 0xc4, 0xe3, 0x28, 0x69, 0x8b, 0x67, 0x1c, 0xc9, + 0xa1, 0x87, 0xa9, 0x44, 0x98, 0x19, 0xeb, 0x82, 0x22, 0x19, 0xed, 0x9e, 0x9b, 0x07, 0x4f, 0x79, + 0xf1, 0x88, 0xae, 0xf0, 0xce, 0xd2, 0xad, 0xd5, 0xe5, 0x01, 0x4e, 0xb4, 0x85, 0x6e, 0xad, 0x8e, + 0x23, 0x1e, 0xaf, 0x21, 0xc4, 0x03, 0x35, 0xa7, 0xc7, 0xaa, 0x21, 0x84, 0xd2, 0x78, 0x2a, 0xe2, + 0x95, 0x62, 0xa9, 0x1b, 0x79, 0xf2, 0x86, 0x51, 0x84, 0xbd, 0xa9, 0x33, 0x08, 0x7b, 0x33, 0xfa, + 0x76, 0x51, 0x44, 0xba, 0x58, 0x3b, 0xfa, 0x99, 0x01, 0x73, 0x4e, 0xfa, 0x76, 0x91, 0x50, 0x33, + 0x3f, 0xd6, 0xa5, 0x55, 0xdf, 0x6d, 0xa5, 0x76, 0x8e, 0xb9, 0x6a, 0x3f, 0x10, 0x3e, 0x89, 0x8d, + 0x6e, 0x41, 0xfe, 0x3b, 0x01, 0xad, 0xb6, 0x6c, 0x4a, 0xcd, 0x42, 0xaa, 0x57, 0xc8, 0x7f, 0xed, + 0xed, 0x86, 0xa0, 0x1f, 0x1f, 0x2e, 0x15, 0xeb, 0x81, 0xab, 0x5e, 0xb1, 0x1e, 0x60, 0xfd, 0xd8, + 0x00, 0x88, 0x7b, 0x7b, 0x7d, 0xf1, 0x67, 0x9c, 0xea, 0xe2, 0x2f, 0xf3, 0x42, 0x17, 0xda, 0x4b, + 0x90, 0x23, 0x61, 0x18, 0x84, 0xb2, 0xfa, 0x2c, 0x70, 0x5f, 0xd9, 0xe0, 0x04, 0x1c, 0xd1, 0xad, + 0x3f, 0x4c, 0xc0, 0x64, 0x83, 0x38, 0x21, 0x61, 0xe7, 0x52, 0x0e, 0x7d, 0x0e, 0x0a, 0x5e, 0xbb, + 0xdd, 0x65, 0xf6, 0x4e, 0x8b, 0x08, 0xd7, 0xcf, 0x47, 0x6e, 0x50, 0x53, 0x44, 0x1c, 0xf3, 0x51, + 0x08, 0x13, 0x62, 0x72, 0xd1, 0xb9, 0x7c, 0x63, 0xc4, 0x8d, 0x8f, 0xac, 0x2d, 0xaf, 0xdb, 0xcc, + 0xde, 0xf0, 0x59, 0xd8, 0xd3, 0xf9, 0x7e, 0x82, 0x93, 0x7e, 0xf2, 0x97, 0xa5, 0x5c, 0xa5, 0xc7, + 0x08, 0xc5, 0x02, 0x0b, 0x7d, 0x68, 0x00, 0x50, 0x16, 0x7a, 0x7e, 0x93, 0x73, 0x65, 0x6d, 0xbc, + 0x35, 0x1e, 0x74, 0x43, 0xeb, 0x8b, 0x26, 0xa0, 0x97, 0x28, 0x66, 0xe0, 0x04, 0x28, 0x2a, 0xcb, + 0xb2, 0x2a, 0x9b, 0x8a, 0xbb, 0xaa, 0xac, 0x82, 0x48, 0x6b, 0x5c, 0x50, 0x2d, 0x7c, 0x05, 0x0a, + 0x5a, 0x39, 0x7a, 0x09, 0xb2, 0xfb, 0xa4, 0x17, 0x45, 0x40, 0xcc, 0x1f, 0xd1, 0xcb, 0x90, 0x3b, + 0xb0, 0x5b, 0xdd, 0xe8, 0x0a, 0x6c, 0x1a, 0x47, 0x2f, 0x37, 0x33, 0x37, 0x8c, 0x85, 0xdb, 0x30, + 0xdb, 0x37, 0xb7, 0xe7, 0x0d, 0x2f, 0x24, 0x86, 0x5b, 0x9f, 0x18, 0x20, 0x27, 0x73, 0x2e, 0x25, + 0xc1, 0x4e, 0xba, 0x24, 0xb8, 0x3d, 0xd6, 0x26, 0x0d, 0xa9, 0x0a, 0xfe, 0x98, 0x81, 0x29, 0x99, + 0xef, 0xce, 0xe5, 0xbc, 0xb8, 0xa9, 0xf6, 0xa1, 0x32, 0xb2, 0x89, 0xc2, 0x82, 0xa1, 0x2d, 0x44, + 0xab, 0xaf, 0x85, 0x58, 0x1f, 0x13, 0xe7, 0xd9, 0x6d, 0xc4, 0x91, 0x01, 0x45, 0x29, 0x79, 0x2e, + 0x7e, 0xe3, 0xa4, 0xfd, 0xe6, 0xce, 0x78, 0xc6, 0x0e, 0x71, 0x9c, 0xdf, 0xc6, 0x46, 0x9e, 0xf2, + 0xff, 0xa0, 0xd1, 0x83, 0xbe, 0x4a, 0x28, 0xd9, 0xa1, 0x09, 0x45, 0xd6, 0x62, 0xe2, 0x9f, 0xd5, + 0x5c, 0xfa, 0xcf, 0xd8, 0x07, 0x92, 0x8e, 0xb5, 0x84, 0xf5, 0xa8, 0xa8, 0xe7, 0x2e, 0xca, 0xe1, + 0xa6, 0xba, 0xde, 0x36, 0xc6, 0x6a, 0xd8, 0x13, 0xcb, 0x31, 0xe4, 0x5f, 0xee, 0xef, 0x41, 0x9e, + 0x92, 0x16, 0x71, 0x58, 0x10, 0xca, 0xcd, 0xa9, 0x8f, 0xef, 0xf1, 0xe5, 0x86, 0x54, 0x19, 0x05, + 0x5f, 0x6d, 0xb8, 0x22, 0x63, 0x8d, 0x89, 0x56, 0xa0, 0xe0, 0xb4, 0xba, 0x94, 0x91, 0xb0, 0x56, + 0x97, 0xd1, 0x57, 0xdf, 0x2c, 0x54, 0x15, 0x03, 0xc7, 0x32, 0xa8, 0x0c, 0xa0, 0x5f, 0xa8, 0x89, + 0xc4, 0xed, 0x50, 0x49, 0x94, 0x7d, 0x9a, 0x8a, 0x13, 0x12, 0x68, 0x45, 0x46, 0xf6, 0xe8, 0xcf, + 0xc4, 0xff, 0xeb, 0x8b, 0xec, 0x6a, 0xd1, 0x13, 0xbd, 0xf2, 0x35, 0x28, 0x92, 0xf7, 0x19, 0x09, + 0x7d, 0xbb, 0xc5, 0x11, 0x72, 0x02, 0x61, 0x96, 0x97, 0xc4, 0x1b, 0x31, 0x19, 0x27, 0x65, 0xd0, + 0x36, 0xcc, 0x52, 0x42, 0xa9, 0x17, 0xf8, 0x6b, 0xbb, 0xbb, 0xbc, 0xab, 0xe8, 0x89, 0x6a, 0xad, + 0x50, 0xf9, 0xac, 0x84, 0x9b, 0x6d, 0xa4, 0xd9, 0xc7, 0x82, 0x14, 0xd5, 0xef, 0x92, 0x84, 0xfb, + 0x55, 0xa0, 0x3b, 0x50, 0x6a, 0xa5, 0xfe, 0x00, 0x91, 0x5d, 0x81, 0xee, 0x67, 0xd2, 0x7f, 0x8f, + 0xe0, 0x3e, 0x69, 0xf4, 0x0d, 0x30, 0x93, 0x94, 0x46, 0xd0, 0x0d, 0x1d, 0x82, 0x6d, 0xbf, 0x49, + 0xa2, 0x8f, 0x19, 0x0a, 0x95, 0xcb, 0x47, 0x87, 0x4b, 0xe6, 0xe6, 0x10, 0x19, 0x3c, 0x74, 0x34, + 0xa2, 0x30, 0xaf, 0xcc, 0xdf, 0x0e, 0xed, 0xdd, 0x5d, 0xcf, 0xa9, 0x07, 0x2d, 0xcf, 0xe9, 0x89, + 0x1e, 0xa2, 0x50, 0xb9, 0x2d, 0x27, 0x38, 0xbf, 0x31, 0x48, 0xe8, 0xf8, 0x70, 0xe9, 0xb2, 0xb4, + 0x7d, 0x20, 0x1f, 0x0f, 0xd6, 0x8d, 0xb6, 0xe0, 0xe2, 0x1e, 0xb1, 0x5b, 0x6c, 0xaf, 0xba, 0x47, + 0x9c, 0x7d, 0x75, 0x86, 0xcc, 0x69, 0x71, 0xb6, 0xd4, 0xbe, 0x5e, 0xbc, 0x7f, 0x52, 0x04, 0x0f, + 0x1a, 0x87, 0x7e, 0x61, 0xc0, 0x7c, 0xdf, 0x8a, 0x47, 0x1f, 0xbd, 0x98, 0xa5, 0xb1, 0xbe, 0x2d, + 0x68, 0x0c, 0xd2, 0x59, 0xb9, 0xc4, 0x97, 0x63, 0x20, 0x0b, 0x0f, 0x9e, 0x05, 0xba, 0x09, 0xe0, + 0x75, 0xee, 0xd9, 0x6d, 0xaf, 0xe5, 0x11, 0x6a, 0x5e, 0x14, 0xfb, 0xb5, 0xc0, 0xfd, 0xbc, 0x56, + 0x57, 0x54, 0x1e, 0x9b, 0xe4, 0x5b, 0x0f, 0x27, 0xa4, 0xd1, 0x26, 0x94, 0xe4, 0x5b, 0x4f, 0x6e, + 0xcc, 0x9c, 0xd8, 0x98, 0x4f, 0x89, 0x2e, 0xb8, 0x9e, 0xe4, 0x1c, 0x9f, 0xa0, 0xe0, 0xbe, 0xb1, + 0xa8, 0x0a, 0x73, 0x49, 0x4f, 0x88, 0x2a, 0xf2, 0x79, 0xa1, 0x70, 0x9e, 0x57, 0xf3, 0x9b, 0xfd, + 0x4c, 0x7c, 0x52, 0x1e, 0x05, 0x30, 0xef, 0xf9, 0x83, 0x5c, 0xe6, 0x15, 0xa1, 0xe8, 0x35, 0xbe, + 0x3e, 0x35, 0xff, 0xd9, 0xee, 0x32, 0x90, 0x8f, 0x07, 0xeb, 0x5d, 0xb8, 0x05, 0x33, 0xa9, 0x28, + 0xf4, 0x42, 0x65, 0xd6, 0xa3, 0x0c, 0x1f, 0x9d, 0xc8, 0xac, 0xe8, 0x07, 0x06, 0x4c, 0x27, 0xad, + 0x92, 0x69, 0xb3, 0x76, 0x06, 0x7f, 0xfb, 0xc9, 0xdc, 0xad, 0xbf, 0x0a, 0x4a, 0xf2, 0x70, 0x0a, + 0x14, 0x75, 0x07, 0x34, 0xcf, 0x6b, 0xa3, 0x66, 0xee, 0x53, 0xb7, 0xce, 0xd6, 0x47, 0x06, 0x0c, + 0x76, 0x5e, 0x14, 0x40, 0xde, 0x91, 0x9f, 0x8c, 0xc9, 0x15, 0x19, 0xf9, 0x1b, 0x94, 0xd4, 0x97, + 0x67, 0xd1, 0x85, 0xbf, 0xa2, 0x61, 0x0d, 0x62, 0xfd, 0xdd, 0x80, 0x9c, 0xb8, 0x69, 0x47, 0xaf, + 0x26, 0xf6, 0xb3, 0x52, 0x94, 0x16, 0x64, 0xdf, 0x22, 0xbd, 0x68, 0x73, 0xaf, 0xa4, 0x36, 0x37, + 0xce, 0x7e, 0xef, 0x70, 0xa2, 0xdc, 0x6b, 0xb4, 0x0a, 0x93, 0x64, 0x77, 0x97, 0x38, 0x4c, 0xa6, + 0x9e, 0x57, 0x55, 0xfd, 0xb4, 0x21, 0xa8, 0x3c, 0x41, 0x08, 0xb0, 0xe8, 0x15, 0x4b, 0x61, 0xde, + 0x97, 0x33, 0xaf, 0x4d, 0xd6, 0x5c, 0x97, 0xb8, 0x67, 0x72, 0x1d, 0x29, 0x1a, 0xb2, 0x6d, 0xa5, + 0x12, 0xc7, 0xda, 0x79, 0x23, 0x7b, 0x89, 0x27, 0x27, 0x77, 0x33, 0x70, 0xec, 0x56, 0x54, 0xb0, + 0x62, 0xb2, 0x4b, 0x42, 0xe2, 0x3b, 0x04, 0x5d, 0x85, 0xbc, 0xdd, 0xf1, 0xde, 0x08, 0x83, 0xae, + 0xba, 0x40, 0x14, 0xeb, 0xb6, 0x56, 0xaf, 0x09, 0x1a, 0xd6, 0x5c, 0x5e, 0xb0, 0xec, 0x7b, 0xbe, + 0x2b, 0x57, 0x43, 0x17, 0x2c, 0x6f, 0x79, 0xbe, 0x8b, 0x05, 0x47, 0x97, 0x4b, 0xd9, 0x61, 0xe5, + 0x92, 0x75, 0x07, 0x8a, 0x89, 0x2f, 0xd6, 0x78, 0xea, 0x6e, 0xf3, 0x87, 0xba, 0xcd, 0xf6, 0xfa, + 0x53, 0xf7, 0x96, 0x62, 0xe0, 0x58, 0xa6, 0xf2, 0xad, 0xc7, 0x4f, 0x17, 0x2f, 0x7c, 0xfc, 0x74, + 0xf1, 0xc2, 0x93, 0xa7, 0x8b, 0x17, 0x3e, 0x38, 0x5a, 0x34, 0x1e, 0x1f, 0x2d, 0x1a, 0x1f, 0x1f, + 0x2d, 0x1a, 0x4f, 0x8e, 0x16, 0x8d, 0x4f, 0x8e, 0x16, 0x8d, 0x9f, 0xff, 0x75, 0xf1, 0xc2, 0x37, + 0x57, 0x47, 0xfa, 0xc6, 0xf3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x64, 0x82, 0x76, 0x1b, + 0x2a, 0x00, 0x00, } func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { @@ -1832,6 +1834,13 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } + if m.IPMode != nil { + i -= len(*m.IPMode) + copy(dAtA[i:], *m.IPMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -3485,6 +3494,10 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if m.IPMode != nil { + l = len(*m.IPMode) + n += 1 + l + sovGenerated(uint64(l)) + } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -4211,6 +4224,7 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -6081,6 +6095,39 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) + m.IPMode = &s + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto index 719483287e..eb3131a2e3 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto @@ -231,6 +231,15 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + optional string ipMode = 3; + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -418,12 +427,9 @@ message PodCondition { optional string message = 6; } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. message PodIP { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod optional string ip = 1; } @@ -517,11 +523,13 @@ message PodStatus { // +patchStrategy=merge repeated PodCondition conditions = 2; - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional optional string hostIP = 5; - // IP address allocated to the pod. Routable at least within the cluster. + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional optional string podIP = 6; @@ -792,10 +800,9 @@ message ServiceSpec { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional optional string loadBalancerIP = 8; diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go index a4e6d69182..2245d3869b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go @@ -193,6 +193,9 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -200,6 +203,22 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + + // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler + // skips scheduling the pod because one or more scheduling gates are still present. + PodReasonSchedulingGated = "SchedulingGated" + + // PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens + // during scheduling, for example due to nodeAffinity parsing errors. + PodReasonSchedulerError = "SchedulerError" + + // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // is initiated by kubelet + PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -309,12 +328,9 @@ type PodSpec struct { HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. type PodIP struct { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` } @@ -348,10 +364,13 @@ type PodStatus struct { // +patchMergeKey=type // +patchStrategy=merge Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. + + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` @@ -560,6 +579,15 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -577,6 +605,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // IPFamilyUnknown indicates that this IP is unknown protocol + IPFamilyUnknown IPFamily = "" ) // IPFamilyPolicy represents the dual-stack-ness requested or required by a Service @@ -712,10 +742,9 @@ type ServiceSpec struct { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` @@ -1048,7 +1077,6 @@ const ( NodeTerminated NodePhase = "Terminated" ) -// +enum type NodeConditionType string // These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here. @@ -1384,3 +1412,15 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go index 62e4787571..182db51603 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go @@ -285,6 +285,11 @@ func (in *EndpointsList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go index ce6e352f0e..aed723500b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go @@ -340,6 +340,14 @@ func (in *LoadBalancerIngress) DeepEqual(other *LoadBalancerIngress) bool { if in.Hostname != other.Hostname { return false } + if (in.IPMode == nil) != (other.IPMode == nil) { + return false + } else if in.IPMode != nil { + if *in.IPMode != *other.IPMode { + return false + } + } + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { in, other := &in.Ports, &other.Ports if other == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go index 13358faed8..ade6fc4349 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go @@ -267,8 +267,8 @@ func (r *Requirement) Operator() selection.Operator { } // Values returns requirement values -func (r *Requirement) Values() sets.String { - ret := sets.String{} +func (r *Requirement) Values() sets.Set[string] { + ret := sets.New[string]() for i := range r.strValues { ret.Insert(r.strValues[i]) } @@ -651,7 +651,7 @@ func (p *Parser) parse() (internalSelector, error) { case IdentifierToken, DoesNotExistToken: r, err := p.parseRequirement() if err != nil { - return nil, fmt.Errorf("unable to parse requirement: %v", err) + return nil, fmt.Errorf("unable to parse requirement: %w", err) } requirements = append(requirements, *r) t, l := p.consume(Values) @@ -686,7 +686,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) { if err != nil { return nil, err } - var values sets.String + var values sets.Set[string] switch operator { case selection.In, selection.NotIn: values, err = p.parseValues() @@ -696,7 +696,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) { if err != nil { return nil, err } - return NewRequirement(key, operator, values.List()) + return NewRequirement(key, operator, sets.List(values)) } @@ -752,7 +752,7 @@ func (p *Parser) parseOperator() (op selection.Operator, err error) { } // parseValues parses the values for set based matching (x,y,z) -func (p *Parser) parseValues() (sets.String, error) { +func (p *Parser) parseValues() (sets.Set[string], error) { tok, lit := p.consume(Values) if tok != OpenParToken { return nil, fmt.Errorf("found '%s' expected: '('", lit) @@ -770,7 +770,7 @@ func (p *Parser) parseValues() (sets.String, error) { return s, nil case ClosedParToken: // handles "()" p.consume(Values) - return sets.NewString(""), nil + return sets.New[string](""), nil default: return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) } @@ -778,8 +778,8 @@ func (p *Parser) parseValues() (sets.String, error) { // parseIdentifiersList parses a (possibly empty) list of // of comma separated (possibly empty) identifiers -func (p *Parser) parseIdentifiersList() (sets.String, error) { - s := sets.NewString() +func (p *Parser) parseIdentifiersList() (sets.Set[string], error) { + s := sets.New[string]() for { tok, lit := p.consume(Values) switch tok { @@ -814,8 +814,8 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) { } // parseExactValue parses the only value for exact match style -func (p *Parser) parseExactValue() (sets.String, error) { - s := sets.NewString() +func (p *Parser) parseExactValue() (sets.Set[string], error) { + s := sets.New[string]() tok, _ := p.lookahead(Values) if tok == EndOfStringToken || tok == CommaToken { s.Insert("") @@ -908,7 +908,7 @@ func SelectorFromSet(ls Set) Selector { // nil and empty Sets are considered equivalent to Everything(). // The Set is validated client-side, which allows to catch errors early. func ValidatedSelectorFromSet(ls Set) (Selector, error) { - if ls == nil || len(ls) == 0 { + if len(ls) == 0 { return internalSelector{}, nil } requirements := make([]Requirement, 0, len(ls)) @@ -930,7 +930,7 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector // instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { - if ls == nil || len(ls) == 0 { + if len(ls) == 0 { return internalSelector{} } requirements := make([]Requirement, 0, len(ls)) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go index c6777d920c..18629faf29 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go @@ -358,82 +358,83 @@ func init() { } var fileDescriptor_e0f89ca41f751b36 = []byte{ - // 1190 bytes of a gzipped FileDescriptorProto + // 1205 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xf7, 0x76, 0x13, 0xc7, 0x7e, 0x4e, 0x9b, 0x74, 0xbe, 0xed, 0x97, 0x25, 0x12, 0xb6, 0x6b, + 0x17, 0xf7, 0x66, 0x13, 0xc7, 0x7e, 0x4e, 0x9b, 0x74, 0xbe, 0xed, 0x97, 0x25, 0x12, 0xb6, 0x6b, 0x24, 0x94, 0x4a, 0xb0, 0xa6, 0x39, 0x54, 0x69, 0x85, 0x10, 0xdd, 0xf4, 0x87, 0x42, 0x9b, 0xb4, - 0x9a, 0xb4, 0x3d, 0x00, 0x07, 0x26, 0xeb, 0xa9, 0x33, 0x78, 0x77, 0xd6, 0xec, 0x8c, 0x03, 0xbe, - 0x15, 0x89, 0x43, 0x01, 0x21, 0x55, 0x1c, 0x10, 0xc7, 0x56, 0xe2, 0x3f, 0xe1, 0xd2, 0x63, 0x8f, - 0x3d, 0x20, 0x8b, 0x1a, 0xfe, 0x08, 0xd4, 0x0b, 0x68, 0x66, 0x67, 0xbd, 0xeb, 0x1f, 0x55, 0x55, - 0x93, 0x93, 0x77, 0xdf, 0x7b, 0xf3, 0xf9, 0x7c, 0x66, 0xf7, 0xbd, 0xcf, 0xac, 0xe1, 0x6a, 0x9b, - 0xc9, 0x83, 0xde, 0xbe, 0xeb, 0x47, 0x61, 0xd3, 0x67, 0x01, 0xeb, 0x8d, 0x7e, 0xba, 0x9d, 0x76, - 0xb3, 0xb3, 0x29, 0x9a, 0x22, 0x60, 0xa1, 0xbe, 0x20, 0x5d, 0x26, 0x9a, 0x21, 0x95, 0xa4, 0x79, - 0x78, 0xae, 0xd9, 0xa6, 0x9c, 0xc6, 0x44, 0xd2, 0x96, 0xdb, 0x8d, 0x23, 0x19, 0xa1, 0xf3, 0x19, - 0x8e, 0x9b, 0x00, 0xa4, 0x3f, 0xdd, 0x4e, 0xdb, 0xed, 0x6c, 0x0a, 0x57, 0xe1, 0xe8, 0x0b, 0x85, - 0xe3, 0x2a, 0x1c, 0xf7, 0xf0, 0xdc, 0xda, 0x7b, 0x39, 0xfe, 0x76, 0xd4, 0x8e, 0x9a, 0x1a, 0x6e, - 0xbf, 0x77, 0x4f, 0xdf, 0xe9, 0x1b, 0x7d, 0x95, 0xd0, 0xac, 0x29, 0x21, 0x2e, 0x8b, 0x94, 0x96, - 0x90, 0xf8, 0x07, 0x8c, 0xd3, 0xb8, 0xaf, 0x95, 0xc6, 0x3d, 0x2e, 0x59, 0x48, 0x27, 0x75, 0xad, - 0x9d, 0x7f, 0xd5, 0x02, 0xe1, 0x1f, 0xd0, 0x90, 0x4c, 0xae, 0x6b, 0xfc, 0x64, 0x43, 0x79, 0x2b, - 0xe2, 0x2d, 0x26, 0x59, 0xc4, 0x51, 0x1d, 0x16, 0x64, 0xbf, 0x4b, 0x1d, 0xab, 0x6e, 0xad, 0x97, - 0xbd, 0xe5, 0x27, 0x83, 0x5a, 0x61, 0x38, 0xa8, 0x2d, 0xdc, 0xee, 0x77, 0x29, 0xd6, 0x19, 0x74, - 0x01, 0x8a, 0x42, 0x12, 0xd9, 0x13, 0xce, 0x31, 0x5d, 0x73, 0xc6, 0xd4, 0x14, 0xf7, 0x74, 0xf4, - 0xc5, 0xa0, 0xb6, 0x32, 0x82, 0x4b, 0x42, 0xd8, 0x2c, 0x40, 0x1f, 0x03, 0x8a, 0xf6, 0x05, 0x8d, - 0x0f, 0x69, 0xeb, 0x5a, 0xa2, 0x82, 0x45, 0xdc, 0xb1, 0xeb, 0xd6, 0xba, 0xed, 0xad, 0x19, 0x18, - 0x74, 0x73, 0xaa, 0x02, 0xcf, 0x58, 0x85, 0x1e, 0x58, 0x80, 0x02, 0x22, 0xe4, 0xed, 0x98, 0x70, - 0xa1, 0xc9, 0x6e, 0xb3, 0x90, 0x3a, 0x0b, 0x75, 0x6b, 0xbd, 0xb2, 0xf1, 0x81, 0x3b, 0xdf, 0x4b, - 0x72, 0x15, 0x46, 0x26, 0xe5, 0xc6, 0x14, 0x3e, 0x9e, 0xc1, 0x89, 0xde, 0x81, 0x62, 0x4c, 0x89, - 0x88, 0xb8, 0xb3, 0xa8, 0x9f, 0xc8, 0x89, 0xf4, 0x89, 0x60, 0x1d, 0xc5, 0x26, 0x8b, 0xce, 0xc2, - 0x52, 0x48, 0x85, 0x20, 0x6d, 0xea, 0x14, 0x75, 0xe1, 0x8a, 0x29, 0x5c, 0xda, 0x49, 0xc2, 0x38, - 0xcd, 0x37, 0xfe, 0x3e, 0x06, 0xc7, 0x6f, 0x90, 0x7d, 0x1a, 0xec, 0xd1, 0x80, 0xfa, 0x32, 0x8a, - 0xd1, 0x8f, 0x16, 0x54, 0x42, 0x22, 0xfd, 0x03, 0x1d, 0x16, 0x8e, 0x55, 0xb7, 0xd7, 0x2b, 0x1b, - 0x77, 0xe7, 0xdd, 0xe8, 0x18, 0xb8, 0xbb, 0x93, 0x01, 0x5f, 0xe1, 0x32, 0xee, 0x7b, 0xff, 0x33, - 0xca, 0x2a, 0xb9, 0x0c, 0xce, 0xf3, 0xa3, 0x9f, 0x2d, 0x58, 0xd5, 0xf7, 0x57, 0xbe, 0xee, 0xc6, - 0x54, 0x08, 0x16, 0x71, 0xd5, 0x11, 0x4a, 0xd4, 0xad, 0x23, 0x11, 0x85, 0xe9, 0x97, 0x3d, 0x16, - 0xd3, 0x90, 0x72, 0xe9, 0x39, 0x46, 0xce, 0xea, 0xce, 0x04, 0x23, 0x9e, 0xd2, 0xb0, 0xf6, 0x21, - 0xac, 0x4e, 0x6e, 0x07, 0xad, 0x82, 0xdd, 0xa1, 0xfd, 0xa4, 0xa9, 0xb1, 0xba, 0x44, 0xa7, 0x60, - 0xf1, 0x90, 0x04, 0x3d, 0x9a, 0x34, 0x31, 0x4e, 0x6e, 0x2e, 0x1e, 0xdb, 0xb4, 0x1a, 0xbf, 0x5a, - 0xe0, 0xbc, 0x4c, 0x08, 0x7a, 0x2b, 0x07, 0xe4, 0x55, 0x8c, 0x2a, 0xfb, 0x3a, 0xed, 0x27, 0xa8, - 0x57, 0xa0, 0x14, 0x75, 0x55, 0x87, 0x46, 0xb1, 0x99, 0x8e, 0xb3, 0xa6, 0xa6, 0x74, 0xd3, 0xc4, - 0x5f, 0x0c, 0x6a, 0xa7, 0xc7, 0xe0, 0xd3, 0x04, 0x1e, 0x2d, 0x45, 0x0d, 0x28, 0x6a, 0x3d, 0xc2, - 0xb1, 0xeb, 0xf6, 0x7a, 0xd9, 0x03, 0xd5, 0x4c, 0x77, 0x75, 0x04, 0x9b, 0x4c, 0xe3, 0x37, 0x0b, - 0x4a, 0x37, 0x98, 0x90, 0x3b, 0x54, 0x12, 0x74, 0x09, 0x56, 0x62, 0x2a, 0xa2, 0x5e, 0xec, 0xd3, - 0xbb, 0x34, 0x56, 0xcf, 0xc1, 0xd0, 0xbf, 0x61, 0xe8, 0x57, 0xf0, 0x78, 0x1a, 0x4f, 0xd6, 0xa3, - 0x77, 0xa1, 0xe4, 0x47, 0x5c, 0x32, 0xde, 0xa3, 0x7a, 0x22, 0xcb, 0xde, 0x6a, 0x2a, 0x7d, 0xcb, - 0xc4, 0xf1, 0xa8, 0x02, 0x5d, 0x05, 0x14, 0xd3, 0x90, 0x30, 0xce, 0x78, 0x7b, 0x5b, 0xd2, 0x70, - 0x2b, 0xea, 0x71, 0xa9, 0x87, 0xcf, 0xf6, 0xfe, 0xaf, 0x46, 0x07, 0x4f, 0x65, 0xf1, 0x8c, 0x15, - 0x8d, 0xbf, 0x96, 0x00, 0x6e, 0xee, 0x7f, 0x41, 0xfd, 0x64, 0x1f, 0x75, 0x58, 0xe0, 0x24, 0x9c, - 0x72, 0x9f, 0x5d, 0x12, 0x52, 0xac, 0x33, 0x68, 0x13, 0x96, 0x53, 0x03, 0x53, 0x51, 0xb3, 0xcd, - 0x53, 0xa6, 0x72, 0xf9, 0x5a, 0x2e, 0x87, 0xc7, 0x2a, 0x51, 0x13, 0xca, 0x0a, 0x41, 0x74, 0x89, - 0x9f, 0xee, 0xf0, 0xa4, 0x59, 0x56, 0xde, 0x4d, 0x13, 0x38, 0xab, 0x41, 0x1e, 0xd8, 0x3d, 0xd6, - 0x32, 0x33, 0xfd, 0x7e, 0xfa, 0xae, 0xef, 0x6c, 0x5f, 0x7e, 0x31, 0xa8, 0x9d, 0x79, 0x99, 0xd9, - 0x2a, 0x8b, 0x14, 0xee, 0x9d, 0xed, 0xcb, 0x58, 0x2d, 0x9e, 0xf5, 0x62, 0x8a, 0xaf, 0xf9, 0x62, - 0x36, 0x00, 0xda, 0x99, 0x59, 0x2e, 0xe9, 0x47, 0x8c, 0xcc, 0x6a, 0xc8, 0x99, 0x64, 0xae, 0x0a, - 0x7d, 0x63, 0xc1, 0xc9, 0x16, 0x0d, 0x68, 0x6a, 0x51, 0x42, 0x92, 0xb0, 0xeb, 0x94, 0x8f, 0xc0, - 0x1b, 0x4f, 0x0f, 0x07, 0xb5, 0x93, 0x97, 0x27, 0xa1, 0xf1, 0x34, 0x1b, 0x3a, 0x84, 0x62, 0x90, - 0x58, 0x55, 0x45, 0xbb, 0xc2, 0xee, 0xbc, 0xbc, 0x59, 0x7f, 0xb8, 0x79, 0x8b, 0x1a, 0xb9, 0xac, - 0x71, 0x27, 0xc3, 0x86, 0xbe, 0xb7, 0xa0, 0x42, 0x38, 0x8f, 0xa4, 0x7e, 0x14, 0xc2, 0x59, 0xd6, - 0xec, 0x7b, 0x47, 0xc0, 0x7e, 0x29, 0x43, 0x9d, 0x70, 0xc9, 0x5c, 0x06, 0xe7, 0xc9, 0xd1, 0x77, - 0x16, 0xac, 0x44, 0x5f, 0x71, 0x1a, 0x63, 0x7a, 0x8f, 0xc6, 0x94, 0xfb, 0x54, 0x38, 0xc7, 0xb5, - 0xa0, 0xab, 0x73, 0x0b, 0x1a, 0x83, 0xcb, 0x1a, 0x69, 0x3c, 0x2e, 0xf0, 0x24, 0xef, 0xda, 0x05, - 0xa8, 0xcc, 0xe9, 0x89, 0xca, 0x53, 0x27, 0x37, 0xff, 0x5a, 0x9e, 0xfa, 0xad, 0x05, 0x27, 0xc6, - 0xf5, 0xa9, 0x51, 0xef, 0x30, 0xde, 0x9a, 0x1c, 0xf5, 0xeb, 0x8c, 0xb7, 0xb0, 0xce, 0x8c, 0xcc, - 0xc0, 0x7e, 0xa9, 0x19, 0xb8, 0x00, 0xca, 0x91, 0xe2, 0x28, 0x08, 0x68, 0xac, 0x07, 0xab, 0xe4, - 0x9d, 0x50, 0x63, 0xb1, 0x35, 0x8a, 0xe2, 0x5c, 0x45, 0xe3, 0x07, 0x0b, 0x4e, 0xdf, 0x22, 0xb1, - 0x64, 0x24, 0xc8, 0x5e, 0x6b, 0x8b, 0x48, 0x82, 0x62, 0x28, 0x85, 0xe6, 0x5a, 0x2b, 0xaa, 0x6c, - 0x78, 0xff, 0xbd, 0x61, 0xb2, 0x31, 0xcd, 0x62, 0x78, 0xc4, 0xd3, 0xf8, 0xc7, 0x82, 0x37, 0x67, - 0xaa, 0x51, 0xb6, 0x8e, 0xf8, 0x94, 0xa2, 0x8f, 0xe6, 0x3e, 0x56, 0xcd, 0x31, 0x91, 0x39, 0x7a, - 0x1a, 0xc9, 0xd4, 0xa0, 0x18, 0x16, 0x99, 0xa4, 0x61, 0x7a, 0x86, 0xef, 0xcc, 0x4b, 0x36, 0x73, - 0x47, 0xde, 0x71, 0xc3, 0xbc, 0xa8, 0xac, 0x5f, 0xe0, 0x84, 0xaa, 0x11, 0xc0, 0x82, 0xfe, 0x80, - 0x3a, 0x0b, 0x4b, 0x82, 0xfa, 0x11, 0x6f, 0x09, 0xbd, 0x55, 0x3b, 0xfb, 0x30, 0xda, 0x4b, 0xc2, - 0x38, 0xcd, 0xa3, 0xb7, 0x61, 0x91, 0x13, 0x1e, 0x25, 0x1f, 0x9f, 0x8b, 0x19, 0xee, 0xae, 0x0a, - 0xe2, 0x24, 0x77, 0xf1, 0xd4, 0x2f, 0x8f, 0x6a, 0x85, 0x07, 0x8f, 0x6b, 0x85, 0x87, 0x8f, 0x6b, - 0x85, 0x47, 0x8f, 0x6b, 0x85, 0xfb, 0xbf, 0xd7, 0x0b, 0x8d, 0x4f, 0xa1, 0x9c, 0xb9, 0xd3, 0x11, - 0x53, 0x36, 0x3e, 0x87, 0x92, 0xfa, 0x46, 0x4e, 0x4f, 0xb1, 0x57, 0xb4, 0xf6, 0x06, 0x00, 0xe9, - 0xb2, 0xf1, 0xa3, 0x7a, 0xd4, 0x2c, 0x97, 0x6e, 0x6d, 0xa7, 0x87, 0x41, 0xae, 0xca, 0xfb, 0xec, - 0xc9, 0xf3, 0x6a, 0xe1, 0xe9, 0xf3, 0x6a, 0xe1, 0xd9, 0xf3, 0x6a, 0xe1, 0xfe, 0xb0, 0x6a, 0x3d, - 0x19, 0x56, 0xad, 0xa7, 0xc3, 0xaa, 0xf5, 0x6c, 0x58, 0xb5, 0xfe, 0x18, 0x56, 0xad, 0x87, 0x7f, - 0x56, 0x0b, 0x9f, 0x9c, 0x9f, 0xef, 0x5f, 0xce, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x96, 0xad, - 0x00, 0xb3, 0x1e, 0x0d, 0x00, 0x00, + 0x9a, 0xb4, 0x3d, 0x00, 0x07, 0x26, 0xeb, 0xa9, 0x33, 0x64, 0x77, 0xd6, 0xec, 0x8c, 0x03, 0xbe, + 0x95, 0x5b, 0x01, 0x21, 0x55, 0x1c, 0x10, 0xc7, 0x56, 0xe2, 0x3f, 0xe1, 0xd2, 0x63, 0x8f, 0x3d, + 0x20, 0x8b, 0x1a, 0xfe, 0x08, 0x54, 0x21, 0x81, 0x66, 0x76, 0xd6, 0xbb, 0xfe, 0x51, 0x95, 0xba, + 0x39, 0x79, 0xf7, 0xbd, 0x37, 0x9f, 0xcf, 0x67, 0x76, 0xdf, 0xfb, 0xcc, 0x1a, 0xae, 0xb4, 0x99, + 0xdc, 0xef, 0xee, 0xb9, 0x7e, 0x14, 0x36, 0x7d, 0x16, 0xb0, 0xee, 0xf0, 0xa7, 0x73, 0xd0, 0x6e, + 0x1e, 0x6c, 0x88, 0xa6, 0x08, 0x58, 0xa8, 0x2f, 0x48, 0x87, 0x89, 0x66, 0x48, 0x25, 0x69, 0x1e, + 0x9e, 0x6d, 0xb6, 0x29, 0xa7, 0x31, 0x91, 0xb4, 0xe5, 0x76, 0xe2, 0x48, 0x46, 0xe8, 0x5c, 0x86, + 0xe3, 0x26, 0x00, 0xe9, 0x4f, 0xe7, 0xa0, 0xed, 0x1e, 0x6c, 0x08, 0x57, 0xe1, 0xe8, 0x0b, 0x85, + 0xe3, 0x2a, 0x1c, 0xf7, 0xf0, 0xec, 0xea, 0x7b, 0x39, 0xfe, 0x76, 0xd4, 0x8e, 0x9a, 0x1a, 0x6e, + 0xaf, 0x7b, 0x57, 0xdf, 0xe9, 0x1b, 0x7d, 0x95, 0xd0, 0xac, 0x2a, 0x21, 0x2e, 0x8b, 0x94, 0x96, + 0x90, 0xf8, 0xfb, 0x8c, 0xd3, 0xb8, 0xa7, 0x95, 0xc6, 0x5d, 0x2e, 0x59, 0x48, 0xc7, 0x75, 0xad, + 0x9e, 0x7b, 0xd9, 0x02, 0xe1, 0xef, 0xd3, 0x90, 0x8c, 0xaf, 0x6b, 0xfc, 0x68, 0x43, 0x79, 0x33, + 0xe2, 0x2d, 0x26, 0x59, 0xc4, 0x51, 0x1d, 0xe6, 0x65, 0xaf, 0x43, 0x1d, 0xab, 0x6e, 0xad, 0x95, + 0xbd, 0xa5, 0xc7, 0xfd, 0x5a, 0x61, 0xd0, 0xaf, 0xcd, 0xdf, 0xea, 0x75, 0x28, 0xd6, 0x19, 0x74, + 0x1e, 0x8a, 0x42, 0x12, 0xd9, 0x15, 0xce, 0x9c, 0xae, 0x39, 0x6d, 0x6a, 0x8a, 0xbb, 0x3a, 0xfa, + 0xbc, 0x5f, 0x5b, 0x1e, 0xc2, 0x25, 0x21, 0x6c, 0x16, 0xa0, 0x8f, 0x01, 0x45, 0x7b, 0x82, 0xc6, + 0x87, 0xb4, 0x75, 0x35, 0x51, 0xc1, 0x22, 0xee, 0xd8, 0x75, 0x6b, 0xcd, 0xf6, 0x56, 0x0d, 0x0c, + 0xba, 0x31, 0x51, 0x81, 0xa7, 0xac, 0x42, 0xf7, 0x2d, 0x40, 0x01, 0x11, 0xf2, 0x56, 0x4c, 0xb8, + 0xd0, 0x64, 0xb7, 0x58, 0x48, 0x9d, 0xf9, 0xba, 0xb5, 0x56, 0x59, 0xff, 0xc0, 0x9d, 0xed, 0x25, + 0xb9, 0x0a, 0x23, 0x93, 0x72, 0x7d, 0x02, 0x1f, 0x4f, 0xe1, 0x44, 0xef, 0x40, 0x31, 0xa6, 0x44, + 0x44, 0xdc, 0x59, 0xd0, 0x4f, 0xe4, 0x78, 0xfa, 0x44, 0xb0, 0x8e, 0x62, 0x93, 0x45, 0x67, 0x60, + 0x31, 0xa4, 0x42, 0x90, 0x36, 0x75, 0x8a, 0xba, 0x70, 0xd9, 0x14, 0x2e, 0x6e, 0x27, 0x61, 0x9c, + 0xe6, 0x1b, 0x7f, 0xcd, 0xc1, 0xb1, 0xeb, 0x64, 0x8f, 0x06, 0xbb, 0x34, 0xa0, 0xbe, 0x8c, 0x62, + 0xf4, 0x83, 0x05, 0x95, 0x90, 0x48, 0x7f, 0x5f, 0x87, 0x85, 0x63, 0xd5, 0xed, 0xb5, 0xca, 0xfa, + 0x9d, 0x59, 0x37, 0x3a, 0x02, 0xee, 0x6e, 0x67, 0xc0, 0x97, 0xb9, 0x8c, 0x7b, 0xde, 0xff, 0x8c, + 0xb2, 0x4a, 0x2e, 0x83, 0xf3, 0xfc, 0xe8, 0x27, 0x0b, 0x56, 0xf4, 0xfd, 0xe5, 0xaf, 0x3b, 0x31, + 0x15, 0x82, 0x45, 0x5c, 0x75, 0x84, 0x12, 0x75, 0xf3, 0x48, 0x44, 0x61, 0xfa, 0x65, 0x97, 0xc5, + 0x34, 0xa4, 0x5c, 0x7a, 0x8e, 0x91, 0xb3, 0xb2, 0x3d, 0xc6, 0x88, 0x27, 0x34, 0xac, 0x7e, 0x08, + 0x2b, 0xe3, 0xdb, 0x41, 0x2b, 0x60, 0x1f, 0xd0, 0x5e, 0xd2, 0xd4, 0x58, 0x5d, 0xa2, 0x93, 0xb0, + 0x70, 0x48, 0x82, 0x2e, 0x4d, 0x9a, 0x18, 0x27, 0x37, 0x17, 0xe6, 0x36, 0xac, 0xc6, 0x2f, 0x16, + 0x38, 0x2f, 0x12, 0x82, 0xde, 0xca, 0x01, 0x79, 0x15, 0xa3, 0xca, 0xbe, 0x46, 0x7b, 0x09, 0xea, + 0x65, 0x28, 0x45, 0x1d, 0xd5, 0xa1, 0x51, 0x6c, 0xa6, 0xe3, 0x8c, 0xa9, 0x29, 0xdd, 0x30, 0xf1, + 0xe7, 0xfd, 0xda, 0xa9, 0x11, 0xf8, 0x34, 0x81, 0x87, 0x4b, 0x51, 0x03, 0x8a, 0x5a, 0x8f, 0x70, + 0xec, 0xba, 0xbd, 0x56, 0xf6, 0x40, 0x35, 0xd3, 0x1d, 0x1d, 0xc1, 0x26, 0xd3, 0xf8, 0xd5, 0x82, + 0xd2, 0x75, 0x26, 0xe4, 0x36, 0x95, 0x04, 0x5d, 0x84, 0xe5, 0x98, 0x8a, 0xa8, 0x1b, 0xfb, 0xf4, + 0x0e, 0x8d, 0xd5, 0x73, 0x30, 0xf4, 0x6f, 0x18, 0xfa, 0x65, 0x3c, 0x9a, 0xc6, 0xe3, 0xf5, 0xe8, + 0x5d, 0x28, 0xf9, 0x11, 0x97, 0x8c, 0x77, 0xa9, 0x9e, 0xc8, 0xb2, 0xb7, 0x92, 0x4a, 0xdf, 0x34, + 0x71, 0x3c, 0xac, 0x40, 0x57, 0x00, 0xc5, 0x34, 0x24, 0x8c, 0x33, 0xde, 0xde, 0x92, 0x34, 0xdc, + 0x8c, 0xba, 0x5c, 0xea, 0xe1, 0xb3, 0xbd, 0xff, 0xab, 0xd1, 0xc1, 0x13, 0x59, 0x3c, 0x65, 0x45, + 0xe3, 0xcf, 0x45, 0x80, 0x1b, 0x7b, 0x5f, 0x50, 0x3f, 0xd9, 0x47, 0x1d, 0xe6, 0x39, 0x09, 0x27, + 0xdc, 0x67, 0x87, 0x84, 0x14, 0xeb, 0x0c, 0xda, 0x80, 0xa5, 0xd4, 0xc0, 0x54, 0xd4, 0x6c, 0xf3, + 0xa4, 0xa9, 0x5c, 0xba, 0x9a, 0xcb, 0xe1, 0x91, 0x4a, 0xd4, 0x84, 0xb2, 0x42, 0x10, 0x1d, 0xe2, + 0xa7, 0x3b, 0x3c, 0x61, 0x96, 0x95, 0x77, 0xd2, 0x04, 0xce, 0x6a, 0x90, 0x07, 0x76, 0x97, 0xb5, + 0xcc, 0x4c, 0xbf, 0x9f, 0xbe, 0xeb, 0xdb, 0x5b, 0x97, 0x9e, 0xf7, 0x6b, 0xa7, 0x5f, 0x64, 0xb6, + 0xca, 0x22, 0x85, 0x7b, 0x7b, 0xeb, 0x12, 0x56, 0x8b, 0xa7, 0xbd, 0x98, 0xe2, 0x2b, 0xbe, 0x98, + 0x75, 0x80, 0x76, 0x66, 0x96, 0x8b, 0xfa, 0x11, 0x23, 0xb3, 0x1a, 0x72, 0x26, 0x99, 0xab, 0x42, + 0xdf, 0x58, 0x70, 0xa2, 0x45, 0x03, 0x9a, 0x5a, 0x94, 0x90, 0x24, 0xec, 0x38, 0xe5, 0x23, 0xf0, + 0xc6, 0x53, 0x83, 0x7e, 0xed, 0xc4, 0xa5, 0x71, 0x68, 0x3c, 0xc9, 0x86, 0x0e, 0xa1, 0x18, 0x24, + 0x56, 0x55, 0xd1, 0xae, 0xb0, 0x33, 0x2b, 0x6f, 0xd6, 0x1f, 0x6e, 0xde, 0xa2, 0x86, 0x2e, 0x6b, + 0xdc, 0xc9, 0xb0, 0xa1, 0xef, 0x2c, 0xa8, 0x10, 0xce, 0x23, 0xa9, 0x1f, 0x85, 0x70, 0x96, 0x34, + 0xfb, 0xee, 0x11, 0xb0, 0x5f, 0xcc, 0x50, 0xc7, 0x5c, 0x32, 0x97, 0xc1, 0x79, 0x72, 0xf4, 0xad, + 0x05, 0xcb, 0xd1, 0x57, 0x9c, 0xc6, 0x98, 0xde, 0xa5, 0x31, 0xe5, 0x3e, 0x15, 0xce, 0x31, 0x2d, + 0xe8, 0xca, 0xcc, 0x82, 0x46, 0xe0, 0xb2, 0x46, 0x1a, 0x8d, 0x0b, 0x3c, 0xce, 0xbb, 0x7a, 0x1e, + 0x2a, 0x33, 0x7a, 0xa2, 0xf2, 0xd4, 0xf1, 0xcd, 0xbf, 0x92, 0xa7, 0xfe, 0x6d, 0xc1, 0xf1, 0x51, + 0x7d, 0xaa, 0xad, 0x49, 0x87, 0xa5, 0x43, 0x91, 0x0c, 0xd9, 0xb0, 0xad, 0x2f, 0xde, 0xdc, 0x4a, + 0xe7, 0x21, 0x57, 0xa5, 0xec, 0xe1, 0x80, 0xf1, 0xd6, 0xb8, 0x3d, 0x5c, 0x63, 0xbc, 0x85, 0x75, + 0x66, 0x68, 0x20, 0xf6, 0x0b, 0x0d, 0xc4, 0x4c, 0xf5, 0xfc, 0xeb, 0x4c, 0xb5, 0x0b, 0xa0, 0x9c, + 0x30, 0x8e, 0x82, 0x80, 0xc6, 0x7a, 0xa0, 0x4b, 0xde, 0x71, 0xa5, 0x7b, 0x73, 0x18, 0xc5, 0xb9, + 0x8a, 0xc6, 0xf7, 0x16, 0x9c, 0xba, 0x49, 0x62, 0xc9, 0x48, 0x90, 0xb5, 0x53, 0x8b, 0x48, 0x82, + 0x62, 0x28, 0x85, 0xe6, 0x5a, 0xef, 0xaa, 0xb2, 0xee, 0xbd, 0x7e, 0xa3, 0x66, 0xcf, 0x31, 0x8b, + 0xe1, 0x21, 0x4f, 0xe3, 0x1f, 0x0b, 0xde, 0x9c, 0xaa, 0x46, 0x1d, 0x27, 0x88, 0x4f, 0x28, 0xfa, + 0x68, 0xe6, 0xe3, 0xdc, 0x1c, 0x4f, 0xd9, 0x49, 0x92, 0x46, 0x32, 0x35, 0x28, 0x86, 0x05, 0x26, + 0x69, 0x98, 0x7e, 0x3b, 0x6c, 0xcf, 0x4a, 0x36, 0x75, 0x47, 0xde, 0x31, 0xc3, 0xbc, 0xa0, 0x8e, + 0x1c, 0x81, 0x13, 0xaa, 0x46, 0x00, 0xf3, 0xfa, 0xc3, 0xed, 0x0c, 0x2c, 0x0a, 0xea, 0x47, 0xbc, + 0x25, 0xf4, 0x56, 0xed, 0xec, 0x83, 0x6c, 0x37, 0x09, 0xe3, 0x34, 0x8f, 0xde, 0x86, 0x05, 0x4e, + 0x78, 0x94, 0x7c, 0xf4, 0x2e, 0x64, 0xb8, 0x3b, 0x2a, 0x88, 0x93, 0xdc, 0x85, 0x93, 0x3f, 0x3f, + 0xac, 0x15, 0xee, 0x3f, 0xaa, 0x15, 0x1e, 0x3c, 0xaa, 0x15, 0x1e, 0x3e, 0xaa, 0x15, 0xee, 0xfd, + 0x56, 0x2f, 0x34, 0x3e, 0x85, 0x72, 0xe6, 0x8a, 0x47, 0x4c, 0xd9, 0xf8, 0x1c, 0x4a, 0xea, 0xdb, + 0x3c, 0x3d, 0x3d, 0x5f, 0x32, 0x1e, 0xa3, 0x43, 0x37, 0xf7, 0x5f, 0x86, 0xce, 0xfb, 0xec, 0xf1, + 0xb3, 0x6a, 0xe1, 0xc9, 0xb3, 0x6a, 0xe1, 0xe9, 0xb3, 0x6a, 0xe1, 0xde, 0xa0, 0x6a, 0x3d, 0x1e, + 0x54, 0xad, 0x27, 0x83, 0xaa, 0xf5, 0x74, 0x50, 0xb5, 0x7e, 0x1f, 0x54, 0xad, 0x07, 0x7f, 0x54, + 0x0b, 0x9f, 0x9c, 0x9b, 0xed, 0xdf, 0xd5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x8c, 0x32, + 0xb1, 0x96, 0x0d, 0x00, 0x00, } func (m *Condition) Marshal() (dAtA []byte, err error) { @@ -788,6 +789,16 @@ func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x30 } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1088,6 +1099,10 @@ func (m *OwnerReference) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) if m.Controller != nil { n += 2 } @@ -1268,6 +1283,8 @@ func (this *OwnerReference) String() string { s := strings.Join([]string{`&OwnerReference{`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Controller:` + valueToStringGenerated(this.Controller) + `,`, `}`, }, "") @@ -2696,6 +2713,70 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto index c63d3425f2..49ca0e991f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto @@ -103,8 +103,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. @@ -273,6 +271,9 @@ message ObjectMeta { // be cluster-scoped, so there is no namespace field. // +structType=atomic message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + // Kind of the referent. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; @@ -281,6 +282,10 @@ message OwnerReference { // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + optional string uid = 4; + // If true, this reference points to the managing controller. // +optional optional bool controller = 6; diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go index a3f6761275..0b9804f6e1 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go @@ -73,10 +73,11 @@ func FormatLabelSelector(labelSelector *LabelSelector) string { // FullOwnerReferences converts slim OwnerReferences to original OwnerReferences func FullOwnerReferences(references []OwnerReference) []metav1.OwnerReference { - var fullRefs []metav1.OwnerReference for _, ref := range references { full := metav1.OwnerReference{ + APIVersion: ref.APIVersion, + UID: ref.UID, Name: ref.Name, Kind: ref.Kind, Controller: ref.Controller, @@ -88,11 +89,12 @@ func FullOwnerReferences(references []OwnerReference) []metav1.OwnerReference { // SlimOwnerReferences converts original OwnerReferences to slim OwnerReferences func SlimOwnerReferences(references []metav1.OwnerReference) []OwnerReference { - var slimRefs []OwnerReference for _, ref := range references { slim := OwnerReference{ + APIVersion: ref.APIVersion, Name: ref.Name, + UID: ref.UID, Kind: ref.Kind, Controller: ref.Controller, } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go index 27d9338234..728b073128 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go @@ -205,12 +205,17 @@ const ( // be cluster-scoped, so there is no namespace field. // +structType=atomic type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` @@ -248,9 +253,7 @@ type MatchLabelsValue = string // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. // diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go index 0e31e67b82..07857fefaf 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go @@ -12,21 +12,29 @@ import ( slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" ) -func ValidateLabelSelector(ps *slim_metav1.LabelSelector, fldPath *field.Path) field.ErrorList { +// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options +type LabelSelectorValidationOptions struct { + // Allow invalid label value in selector + AllowInvalidLabelValueInSelector bool +} + +// ValidateLabelSelector validate the LabelSelector according to the opts and returns any validation errors. +// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. +func ValidateLabelSelector(ps *slim_metav1.LabelSelector, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ps == nil { return allErrs } allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) for i, expr := range ps.MatchExpressions { - allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, opts, fldPath.Child("matchExpressions").Index(i))...) } return allErrs } // ValidateLabelSelectorRequirement validate the requirement according to the opts and returns any validation errors. // opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. -func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { +func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} switch sr.Operator { case slim_metav1.LabelSelectorOpIn, slim_metav1.LabelSelectorOpNotIn: @@ -41,6 +49,13 @@ func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, f allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + if !opts.AllowInvalidLabelValueInSelector { + for valueIndex, value := range sr.Values { + for _, msg := range validation.IsValidLabelValue(value) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(valueIndex), value, msg)) + } + } + } return allErrs } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go index 140545c0e6..21c8be665c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go @@ -242,12 +242,18 @@ func (in *OwnerReference) DeepEqual(other *OwnerReference) bool { return false } + if in.APIVersion != other.APIVersion { + return false + } if in.Kind != other.Kind { return false } if in.Name != other.Name { return false } + if in.UID != other.UID { + return false + } if (in.Controller == nil) != (other.Controller == nil) { return false } else if in.Controller != nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go index 930c32d0dd..e47652d11f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go @@ -6,14 +6,17 @@ package utils import ( "net" "sort" + "strings" v1 "k8s.io/api/core/v1" v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cilium/cilium/pkg/ip" + k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection" + labelsPkg "github.com/cilium/cilium/pkg/labels" ) const ( @@ -59,11 +62,10 @@ func GetObjNamespaceName(obj NamespaceNameGetter) string { return ns + "/" + obj.GetName() } -// ServiceConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier -type ServiceConfiguration interface { - // K8sServiceProxyNameValue must return the value of the proxy name - // annotation. If set, only services with this label will be handled. - K8sServiceProxyNameValue() string +// EnvoyConfigConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier +type EnvoyConfigConfiguration interface { + // K8sEnvoyConfigEnabled returns true if CiliumEnvoyConfig feature is enabled in Cilium + K8sEnvoyConfigEnabled() bool } // IngressConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier @@ -114,7 +116,7 @@ func GetEndpointSliceListOptionsModifier() (func(options *v1meta.ListOptions), e // handle services that match our service proxy name. If the service proxy name for Cilium // is an empty string, we assume that Cilium is the default service handler in which case // we select all services that don't have the above mentioned label. -func GetServiceAndEndpointListOptionsModifier(cfg ServiceConfiguration) (func(options *v1meta.ListOptions), error) { +func GetServiceAndEndpointListOptionsModifier(k8sServiceProxy string) (func(options *v1meta.ListOptions), error) { var ( serviceNameSelector, nonHeadlessServiceSelector *labels.Requirement err error @@ -125,12 +127,12 @@ func GetServiceAndEndpointListOptionsModifier(cfg ServiceConfiguration) (func(op return nil, err } - if cfg.K8sServiceProxyNameValue() == "" { + if k8sServiceProxy == "" { serviceNameSelector, err = labels.NewRequirement( serviceProxyNameLabel, selection.DoesNotExist, nil) } else { serviceNameSelector, err = labels.NewRequirement( - serviceProxyNameLabel, selection.DoubleEquals, []string{cfg.K8sServiceProxyNameValue()}) + serviceProxyNameLabel, selection.DoubleEquals, []string{k8sServiceProxy}) } if err != nil { @@ -219,3 +221,61 @@ func GetClusterIPByFamily(ipFamily slim_corev1.IPFamily, service *slim_corev1.Se return "" } + +// filterPodLabels returns a copy of the given labels map, without the labels owned by Cilium. +func filterPodLabels(labels map[string]string) map[string]string { + res := map[string]string{} + for k, v := range labels { + if strings.HasPrefix(k, k8sconst.LabelPrefix) { + continue + } + res[k] = v + } + return res +} + +// SanitizePodLabels makes sure that no important pod labels were overridden manually on k8s pod +// object creation. +func SanitizePodLabels(podLabels map[string]string, namespace *slim_corev1.Namespace, serviceAccount, clusterName string) map[string]string { + sanitizedLabels := filterPodLabels(podLabels) + + // Sanitize namespace labels + for k, v := range namespace.GetLabels() { + sanitizedLabels[joinPath(k8sconst.PodNamespaceMetaLabels, k)] = v + } + // Sanitize namespace name label + sanitizedLabels[k8sconst.PodNamespaceLabel] = namespace.ObjectMeta.Name + // Sanitize service account name + if serviceAccount != "" { + sanitizedLabels[k8sconst.PolicyLabelServiceAccount] = serviceAccount + } else { + delete(sanitizedLabels, k8sconst.PolicyLabelServiceAccount) + } + // Sanitize cluster name + sanitizedLabels[k8sconst.PolicyLabelCluster] = clusterName + + return sanitizedLabels +} + +// StripPodSpecialLabels strips labels that are not supposed to be coming from a k8s pod object update. +func StripPodSpecialLabels(labels map[string]string) map[string]string { + sanitizedLabels := make(map[string]string) + for k, v := range filterPodLabels(labels) { + // If the key contains the prefix for namespace labels then we will + // ignore it. + if strings.HasPrefix(k, k8sconst.PodNamespaceMetaLabels) { + continue + } + // Also ignore it if the key is a kubernetes namespace label. + if k == k8sconst.PodNamespaceLabel { + continue + } + sanitizedLabels[k] = v + } + return sanitizedLabels +} + +// joinPath mimics JoinPath from pkg/policy/utils, which could not be imported here due to circular dependency +func joinPath(a, b string) string { + return a + labelsPkg.PathDelimiter + b +} diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr.go new file mode 100644 index 0000000000..611ff5512d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/labels/cidr.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package labels + +import ( + "fmt" + "net/netip" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/option" +) + +// maskedIPToLabelString is the base method for serializing an IP + prefix into +// a string that can be used for creating Labels and EndpointSelector objects. +// +// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't +// support colons inside the name section of a label. +func maskedIPToLabel(ipStr string, prefix int) Label { + var str strings.Builder + str.Grow( + 1 /* preZero */ + + len(ipStr) + + 1 /* postZero */ + + 2 /*len of prefix*/ + + 1, /* '/' */ + ) + + for i := 0; i < len(ipStr); i++ { + if ipStr[i] == ':' { + // EndpointSelector keys can't start or end with a "-", so insert a + // zero at the start or end if it would otherwise have a "-" at that + // position. + if i == 0 { + str.WriteByte('0') + str.WriteByte('-') + continue + } + if i == len(ipStr)-1 { + str.WriteByte('-') + str.WriteByte('0') + continue + } + str.WriteByte('-') + } else { + str.WriteByte(ipStr[i]) + } + } + str.WriteRune('/') + str.WriteString(strconv.Itoa(prefix)) + return Label{Key: str.String(), Source: LabelSourceCIDR} +} + +// IPStringToLabel parses a string and returns it as a CIDR label. +// +// If ip is not a valid IP address or CIDR Prefix, returns an error. +func IPStringToLabel(ip string) (Label, error) { + // factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's + // an IP and not a CIDR. + i := strings.LastIndexByte(ip, '/') + if i < 0 { + parsedIP, err := netip.ParseAddr(ip) + if err != nil { + return Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err) + } + return maskedIPToLabel(ip, parsedIP.BitLen()), nil + } else { + parsedPrefix, err := netip.ParsePrefix(ip) + if err != nil { + return Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err) + } + return maskedIPToLabel(parsedPrefix.Masked().Addr().String(), parsedPrefix.Bits()), nil + } +} + +// GetCIDRLabels turns a CIDR into a set of labels representing the cidr itself +// and all broader CIDRS which include the specified CIDR in them. For example: +// CIDR: 10.0.0.0/8 => +// +// "cidr:10.0.0.0/8", "cidr:10.0.0.0/7", "cidr:8.0.0.0/6", +// "cidr:8.0.0.0/5", "cidr:0.0.0.0/4, "cidr:0.0.0.0/3", +// "cidr:0.0.0.0/2", "cidr:0.0.0.0/1", "cidr:0.0.0.0/0" +// +// The identity reserved:world is always added as it includes any CIDR. +func GetCIDRLabels(prefix netip.Prefix) Labels { + once.Do(func() { + // simplelru.NewLRU fails only when given a negative size, so we can skip the error check + cidrLabelsCache, _ = simplelru.NewLRU[netip.Prefix, []Label](cidrLabelsCacheMaxSize, nil) + }) + + addr := prefix.Addr() + ones := prefix.Bits() + lbls := make(Labels, 1 /* this CIDR */ +ones /* the prefixes */ +1 /*world label*/) + + // If ones is zero, then it's the default CIDR prefix /0 which should + // just be regarded as reserved:world. In all other cases, we need + // to generate the set of prefixes starting from the /0 up to the + // specified prefix length. + if ones == 0 { + addWorldLabel(addr, lbls) + return lbls + } + + computeCIDRLabels( + cidrLabelsCache, + lbls, + nil, // avoid allocating space for the intermediate results until we need it + addr, + ones, + ) + addWorldLabel(addr, lbls) + + return lbls +} + +var ( + // cidrLabelsCache stores the partial computations for CIDR labels. + // This both avoids repeatedly computing the prefixes and makes sure the + // CIDR strings are reused to reduce memory usage. + // Stored in a lru map to limit memory usage. + // + // Stores e.g. for prefix "10.0.0.0/8" the labels ["10.0.0.0/8", ..., "0.0.0.0/0"]. + cidrLabelsCache *simplelru.LRU[netip.Prefix, []Label] + + // mutex to serialize concurrent accesses to the cidrLabelsCache. + mu lock.Mutex +) + +const cidrLabelsCacheMaxSize = 8192 + +func addWorldLabel(addr netip.Addr, lbls Labels) { + switch { + case !option.Config.IsDualStack(): + lbls[worldLabelNonDualStack.Key] = worldLabelNonDualStack + case addr.Is4(): + lbls[worldLabelV4.Key] = worldLabelV4 + default: + lbls[worldLabelV6.Key] = worldLabelV6 + } +} + +var ( + once sync.Once + + worldLabelNonDualStack = Label{Key: IDNameWorld, Source: LabelSourceReserved} + worldLabelV4 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv4} + worldLabelV6 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv6} +) + +func computeCIDRLabels(cache *simplelru.LRU[netip.Prefix, []Label], lbls Labels, results []Label, addr netip.Addr, ones int) []Label { + if ones < 0 { + return results + } + + prefix, _ := addr.Prefix(ones) + + mu.Lock() + cachedLbls, ok := cache.Get(prefix) + mu.Unlock() + if ok { + for _, lbl := range cachedLbls { + lbls[lbl.Key] = lbl + } + if results == nil { + return cachedLbls + } else { + return append(results, cachedLbls...) + } + } + + // Compute the label for this prefix (e.g. "cidr:10.0.0.0/8") + prefixLabel := maskedIPToLabel(prefix.Addr().String(), ones) + lbls[prefixLabel.Key] = prefixLabel + + // Keep computing the rest (e.g. "cidr:10.0.0.0/7", ...). + results = computeCIDRLabels( + cache, + lbls, + append(results, prefixLabel), + prefix.Addr(), ones-1, + ) + + // Cache the resulting labels derived from this prefix, e.g. /8, /7, ... + mu.Lock() + cache.Add(prefix, results[len(results)-ones-1:]) + mu.Unlock() + + return results +} + +// leafCIDRList is a map of CIDR to data, where only leaf CIDRs are present +// in the map. +type leafCIDRList[T any] map[netip.Prefix]T + +// insert conditionally adds a prefix to the leaf cidr list, +// adding it only if the prefix is a leaf. Additionally, it removes +// any now non-leaf cidr. +func (ll leafCIDRList[T]) insert(newPrefix netip.Prefix, v T) { + // Check every existing leaf CIDR. Three possible cases: + // - an existing prefix contains this one: delete existing, add new + // - this new prefix contains an existing one: drop new prefix + // - no matches: add new + for existingPrefix := range ll { + // Is this a subset of an existing prefix? That means we've found a now non-leaf + // prefix -- swap it + if existingPrefix.Contains(newPrefix.Addr()) && existingPrefix.Bits() < newPrefix.Bits() { + delete(ll, existingPrefix) + // it is safe to stop here, since at most one prefix in the list could + // have contained this prefix. + break + } + + // Is this a superset of an existing prefix? Then we're not a leaf; skip it + if newPrefix.Contains(existingPrefix.Addr()) && newPrefix.Bits() <= existingPrefix.Bits() { + return + } + } + ll[newPrefix] = v +} diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go deleted file mode 100644 index c05cf985da..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cidr - -import ( - "fmt" - "net/netip" - "strconv" - "strings" - - "github.com/cilium/cilium/pkg/labels" -) - -// maskedIPToLabelString is the base method for serializing an IP + prefix into -// a string that can be used for creating Labels and EndpointSelector objects. -// -// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't -// support colons inside the name section of a label. -func maskedIPToLabelString(ip netip.Addr, prefix int) string { - ipStr := ip.String() - ipNoColons := strings.Replace(ipStr, ":", "-", -1) - - // EndpointSelector keys can't start or end with a "-", so insert a - // zero at the start or end if it would otherwise have a "-" at that - // position. - preZero := "" - postZero := "" - if ipNoColons[0] == '-' { - preZero = "0" - } - if ipNoColons[len(ipNoColons)-1] == '-' { - postZero = "0" - } - var str strings.Builder - str.Grow( - len(labels.LabelSourceCIDR) + - len(preZero) + - len(ipNoColons) + - len(postZero) + - 2 /*len of prefix*/ + - 2, /* ':' '/' */ - ) - str.WriteString(labels.LabelSourceCIDR) - str.WriteRune(':') - str.WriteString(preZero) - str.WriteString(ipNoColons) - str.WriteString(postZero) - str.WriteRune('/') - str.WriteString(strconv.Itoa(prefix)) - return str.String() -} - -// IPStringToLabel parses a string and returns it as a CIDR label. -// -// If ip is not a valid IP address or CIDR Prefix, returns an error. -func IPStringToLabel(ip string) (labels.Label, error) { - var lblString string - // factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's - // an IP and not a CIDR. - i := strings.LastIndexByte(ip, '/') - if i < 0 { - parsedIP, err := netip.ParseAddr(ip) - if err != nil { - return labels.Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err) - } - lblString = maskedIPToLabelString(parsedIP, parsedIP.BitLen()) - } else { - parsedPrefix, err := netip.ParsePrefix(ip) - if err != nil { - return labels.Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err) - } - lblString = maskedIPToLabelString(parsedPrefix.Masked().Addr(), parsedPrefix.Bits()) - } - return labels.ParseLabel(lblString), nil -} - -// GetCIDRLabels turns a CIDR into a set of labels representing the cidr itself -// and all broader CIDRS which include the specified CIDR in them. For example: -// CIDR: 10.0.0.0/8 => -// -// "cidr:10.0.0.0/8", "cidr:10.0.0.0/7", "cidr:8.0.0.0/6", -// "cidr:8.0.0.0/5", "cidr:0.0.0.0/4, "cidr:0.0.0.0/3", -// "cidr:0.0.0.0/2", "cidr:0.0.0.0/1", "cidr:0.0.0.0/0" -// -// The identity reserved:world is always added as it includes any CIDR. -func GetCIDRLabels(prefix netip.Prefix) labels.Labels { - ones := prefix.Bits() - result := make([]string, 0, ones+1) - - // If ones is zero, then it's the default CIDR prefix /0 which should - // just be regarded as reserved:world. In all other cases, we need - // to generate the set of prefixes starting from the /0 up to the - // specified prefix length. - if ones > 0 { - ip := prefix.Addr() - for i := 0; i <= ones; i++ { - p := netip.PrefixFrom(ip, i) - label := maskedIPToLabelString(p.Masked().Addr(), i) - result = append(result, label) - } - } - - result = append(result, labels.LabelSourceReserved+":"+labels.IDNameWorld) - - return labels.NewLabelsFromModel(result) -} diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go deleted file mode 100644 index f97bd9a51f..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -// Package cidr provides helper methods for generating labels for CIDRs which -// are partially derived from node state. -package cidr diff --git a/vendor/github.com/cilium/cilium/pkg/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/labels/labels.go index 1eaf46e47e..74a7afd32c 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/labels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/labels.go @@ -7,7 +7,8 @@ import ( "bytes" "encoding/json" "fmt" - "net" + "net/netip" + "slices" "sort" "strings" ) @@ -26,6 +27,14 @@ const ( // IDNameWorld is the label used for the world ID. IDNameWorld = "world" + // IDNameWorldIPv4 is the label used for the world-ipv4 ID, to distinguish + // it from world-ipv6 in dual-stack mode. + IDNameWorldIPv4 = "world-ipv4" + + // IDNameWorldIPv6 is the label used for the world-ipv6 ID, to distinguish + // it from world-ipv4 in dual-stack mode. + IDNameWorldIPv6 = "world-ipv6" + // IDNameCluster is the label used to identify an unspecified endpoint // inside the cluster IDNameCluster = "cluster" @@ -69,6 +78,12 @@ var ( // LabelWorld is the label used for world. LabelWorld = Labels{IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved)} + // LabelWorldIPv4 is the label used for world-ipv4. + LabelWorldIPv4 = Labels{IDNameWorldIPv4: NewLabel(IDNameWorldIPv4, "", LabelSourceReserved)} + + // LabelWorldIPv6 is the label used for world-ipv6. + LabelWorldIPv6 = Labels{IDNameWorldIPv6: NewLabel(IDNameWorldIPv6, "", LabelSourceReserved)} + // LabelRemoteNode is the label used for remote nodes. LabelRemoteNode = Labels{IDNameRemoteNode: NewLabel(IDNameRemoteNode, "", LabelSourceReserved)} @@ -100,6 +115,9 @@ const ( // LabelSourceContainer is a label imported from the container runtime LabelSourceContainer = "container" + // LabelSourceCNI is a label imported from the CNI plugin + LabelSourceCNI = "cni" + // LabelSourceReserved is the label source for reserved types. LabelSourceReserved = "reserved" @@ -129,26 +147,24 @@ type Labels map[string]Label // GetPrintableModel turns the Labels into a sorted list of strings // representing the labels, with CIDRs deduplicated (ie, only provide the most -// specific CIDR). +// specific CIDRs). func (l Labels) GetPrintableModel() (res []string) { - cidr := "" - prefixLength := 0 + // Aggregate list of "leaf" CIDRs + leafCIDRs := leafCIDRList[*Label]{} for _, v := range l { + // If this is a CIDR label, filter out non-leaf CIDRs for human consumption if v.Source == LabelSourceCIDR { - vStr := strings.Replace(v.String(), "-", ":", -1) - prefix := strings.Replace(v.Key, "-", ":", -1) - _, ipnet, _ := net.ParseCIDR(prefix) - ones, _ := ipnet.Mask.Size() - if ones > prefixLength { - cidr = vStr - prefixLength = ones - } - continue + v := v + prefixStr := strings.Replace(v.Key, "-", ":", -1) + prefix, _ := netip.ParsePrefix(prefixStr) + leafCIDRs.insert(prefix, &v) + } else { + // not a CIDR label, no magic needed + res = append(res, v.String()) } - res = append(res, v.String()) } - if cidr != "" { - res = append(res, cidr) + for _, val := range leafCIDRs { + res = append(res, strings.Replace(val.String(), "-", ":", -1)) } sort.Strings(res) @@ -160,20 +176,6 @@ func (l Labels) String() string { return strings.Join(l.GetPrintableModel(), ",") } -// AppendPrefixInKey appends the given prefix to all the Key's of the map and the -// respective Labels' Key. -func (l Labels) AppendPrefixInKey(prefix string) Labels { - newLabels := Labels{} - for k, v := range l { - newLabels[prefix+k] = Label{ - Key: prefix + v.Key, - Value: v.Value, - Source: v.Source, - } - } - return newLabels -} - // Equals returns true if the two Labels contain the same set of labels. func (l Labels) Equals(other Labels) bool { if len(l) != len(other) { @@ -291,7 +293,7 @@ func (l *Label) UnmarshalJSON(data []byte) error { var aux string if err := json.Unmarshal(data, &aux); err != nil { - return fmt.Errorf("decode of Label as string failed: %+v", err) + return fmt.Errorf("decode of Label as string failed: %w", err) } if aux == "" { @@ -395,6 +397,15 @@ func NewLabelsFromModel(base []string) Labels { return lbls } +// FromSlice creates labels from a slice of labels. +func FromSlice(labels []Label) Labels { + lbls := make(Labels, len(labels)) + for _, lbl := range labels { + lbls[lbl.Key] = lbl + } + return lbls +} + // NewLabelsFromSortedList returns labels based on the output of SortedList() func NewLabelsFromSortedList(list string) Labels { return NewLabelsFromModel(strings.Split(list, ";")) @@ -413,7 +424,7 @@ func NewSelectLabelArrayFromModel(base []string) LabelArray { // NewFrom creates a new Labels from the given labels by creating a copy. func NewFrom(l Labels) Labels { - nl := NewLabelsFromModel(nil) + nl := make(Labels, len(l)) nl.MergeLabels(l) return nl } @@ -469,13 +480,24 @@ func (l Label) FormatForKVStore() []byte { // kvstore.prefixMatchesKey()) b := make([]byte, 0, len(l.Source)+len(l.Key)+len(l.Value)+3) buf := bytes.NewBuffer(b) + l.formatForKVStoreInto(buf) + return buf.Bytes() +} + +// formatForKVStoreInto writes the label as a formatted string, ending in +// a semicolon into buf. +// +// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS +// PART OF THE KEY IN THE KEY-VALUE STORE. +// +// Non-pointer receiver allows this to be called on a value in a map. +func (l Label) formatForKVStoreInto(buf *bytes.Buffer) { buf.WriteString(l.Source) buf.WriteRune(':') buf.WriteString(l.Key) buf.WriteRune('=') buf.WriteString(l.Value) buf.WriteRune(';') - return buf.Bytes() } // SortedList returns the labels as a sorted list, separated by semicolon @@ -487,12 +509,23 @@ func (l Labels) SortedList() []byte { for k := range l { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) - b := make([]byte, 0, len(keys)*2) + // Labels can have arbitrary size. However, when many CIDR identities are in + // the system, for example due to a FQDN policy matching S3, CIDR labels + // dominate in number. IPv4 CIDR labels in serialized form are max 25 bytes + // long. Allocate slightly more to avoid having a realloc if there's some + // other labels which may longer, since the cost of allocating a few bytes + // more is dominated by a second allocation, especially since these + // allocations are short-lived. + // + // cidr:123.123.123.123/32=; + // 0 1 2 + // 1234567890123456789012345 + b := make([]byte, 0, len(keys)*30) buf := bytes.NewBuffer(b) for _, k := range keys { - buf.Write(l[k].FormatForKVStore()) + l[k].formatForKVStoreInto(buf) } return buf.Bytes() diff --git a/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go b/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go index f05f9f0455..96f394f462 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go @@ -116,22 +116,22 @@ func (o *OpLabels) AllLabels() Labels { return all } -func (o *OpLabels) ReplaceInformationLabels(l Labels, logger *logrus.Entry) bool { +func (o *OpLabels) ReplaceInformationLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool { changed := false keepers := make(keepMarks) for _, v := range l { keepers.set(v.Key) - if o.OrchestrationInfo.upsertLabel(v) { + if o.OrchestrationInfo.upsertLabel(sourceFilter, v) { changed = true logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning information label") } } - o.OrchestrationInfo.deleteUnMarked(keepers) + o.OrchestrationInfo.deleteUnMarked(sourceFilter, keepers) return changed } -func (o *OpLabels) ReplaceIdentityLabels(l Labels, logger *logrus.Entry) bool { +func (o *OpLabels) ReplaceIdentityLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool { changed := false keepers := make(keepMarks) @@ -141,13 +141,13 @@ func (o *OpLabels) ReplaceIdentityLabels(l Labels, logger *logrus.Entry) bool { // A disabled identity label stays disabled without value updates if _, found := o.Disabled[k]; found { disabledKeepers.set(k) - } else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(v) { + } else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(sourceFilter, v) { logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning security relevant label") changed = true } } - if o.OrchestrationIdentity.deleteUnMarked(keepers) || o.Disabled.deleteUnMarked(disabledKeepers) { + if o.OrchestrationIdentity.deleteUnMarked(sourceFilter, keepers) || o.Disabled.deleteUnMarked(sourceFilter, disabledKeepers) { changed = true } @@ -201,25 +201,40 @@ func (o *OpLabels) ModifyIdentityLabels(addLabels, delLabels Labels) (changed bo // upsertLabel updates or inserts 'label' in 'l', but only if exactly the same label // was not already in 'l'. Returns 'true' if a label was added, or an old label was // updated, 'false' otherwise. -func (l Labels) upsertLabel(label Label) bool { +// The label is only updated if its source matches the provided 'sourceFilter' +// or in case the provided sourceFilter is 'LabelSourceAny'. The new label must +// also match the old label 'source' in order for it to be replaced. +func (l Labels) upsertLabel(sourceFilter string, label Label) bool { oldLabel, found := l[label.Key] if found { + if sourceFilter != LabelSourceAny && sourceFilter != oldLabel.Source { + return false + } + // Key is the same, check if Value and Source are also the same if label.Value == oldLabel.Value && label.Source == oldLabel.Source { return false // No change } + + // If the label is not from the same source, then don't replace it. + if oldLabel.Source != label.Source { + return false + } } + // Insert or replace old label l[label.Key] = label return true } // deleteUnMarked deletes the labels which have not been marked for keeping. +// The labels are only deleted if their source matches the provided sourceFilter +// or in case the provided sourceFilter is 'LabelSourceAny'. // Returns true if any of them were deleted. -func (l Labels) deleteUnMarked(marks keepMarks) bool { +func (l Labels) deleteUnMarked(sourceFilter string, marks keepMarks) bool { deleted := false - for k := range l { - if _, keep := marks[k]; !keep { + for k, v := range l { + if _, keep := marks[k]; !keep && (sourceFilter == LabelSourceAny || sourceFilter == v.Source) { delete(l, k) deleted = true } diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go index ac172cdea4..07073c5d9f 100644 --- a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go +++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go @@ -7,6 +7,7 @@ import ( "fmt" "net" "sort" + "strconv" "strings" "github.com/cilium/cilium/api/v1/models" @@ -150,6 +151,10 @@ func (s ServiceFlags) SVCType() SVCType { } } +func (s ServiceFlags) IsL7LB() bool { + return s&serviceFlagL7LoadBalancer != 0 +} + // SVCExtTrafficPolicy returns a service traffic policy from the flags func (s ServiceFlags) SVCExtTrafficPolicy() SVCTrafficPolicy { switch { @@ -739,9 +744,9 @@ func (a *L3n4Addr) String() string { scope = "/i" } if a.IsIPv6() { - return fmt.Sprintf("[%s]:%d%s", a.AddrCluster.String(), a.Port, scope) + return "[" + a.AddrCluster.String() + "]:" + strconv.FormatUint(uint64(a.Port), 10) + scope } - return fmt.Sprintf("%s:%d%s", a.AddrCluster.String(), a.Port, scope) + return a.AddrCluster.String() + ":" + strconv.FormatUint(uint64(a.Port), 10) + scope } // StringWithProtocol returns the L3n4Addr in the "IPv4:Port/Protocol[/Scope]" @@ -752,9 +757,9 @@ func (a *L3n4Addr) StringWithProtocol() string { scope = "/i" } if a.IsIPv6() { - return fmt.Sprintf("[%s]:%d/%s%s", a.AddrCluster.String(), a.Port, a.Protocol, scope) + return "[" + a.AddrCluster.String() + "]:" + strconv.FormatUint(uint64(a.Port), 10) + "/" + a.Protocol + scope } - return fmt.Sprintf("%s:%d/%s%s", a.AddrCluster.String(), a.Port, a.Protocol, scope) + return a.AddrCluster.String() + ":" + strconv.FormatUint(uint64(a.Port), 10) + "/" + a.Protocol + scope } // StringID returns the L3n4Addr as string to be used for unique identification diff --git a/vendor/github.com/cilium/cilium/pkg/lock/map.go b/vendor/github.com/cilium/cilium/pkg/lock/map.go new file mode 100644 index 0000000000..f54702a494 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/lock/map.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package lock + +import "sync" + +// Map is a thin generic wrapper around sync.Map. The sync.Map description from +// the standard library follows (and is also propagated to the corresponding +// methods) for users' convenience: +// +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type Map[K comparable, V any] sync.Map + +// MapCmpValues is an extension of Map, which additionally wraps the two extra +// methods requiring values to be also of comparable type. +type MapCmpValues[K, V comparable] Map[K, V] + +// Load returns the value stored in the map for a key, or the zero value if no +// value is present. The ok result indicates whether value was found in the map. +func (m *Map[K, V]) Load(key K) (value V, ok bool) { + val, ok := (*sync.Map)(m).Load(key) + return m.convert(val, ok) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + val, loaded := (*sync.Map)(m).LoadOrStore(key, value) + return val.(V), loaded +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any +// (zero value otherwise). The loaded result reports whether the key was present. +func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + val, loaded := (*sync.Map)(m).LoadAndDelete(key) + return m.convert(val, loaded) +} + +// Store sets the value for a key. +func (m *Map[K, V]) Store(key K, value V) { + (*sync.Map)(m).Store(key, value) +} + +// Swap swaps the value for a key and returns the previous value if any (zero +// value otherwise). The loaded result reports whether the key was present. +func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { + val, loaded := (*sync.Map)(m).Swap(key, value) + return m.convert(val, loaded) +} + +// Delete deletes the value for a key. +func (m *Map[K, V]) Delete(key K) { + (*sync.Map)(m).Delete(key) +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + (*sync.Map)(m).Range(func(key, value any) bool { + return f(key.(K), value.(V)) + }) +} + +// CompareAndDelete deletes the entry for key if its value is equal to old. +// If there is no current value for key in the map, CompareAndDelete returns false +// (even if the old value is the nil interface value). +func (m *MapCmpValues[K, V]) CompareAndDelete(key K, old V) (deleted bool) { + return (*sync.Map)(m).CompareAndDelete(key, old) +} + +// CompareAndSwap swaps the old and new values for key if the value stored in +// the map is equal to old. +func (m *MapCmpValues[K, V]) CompareAndSwap(key K, old, new V) bool { + return (*sync.Map)(m).CompareAndSwap(key, old, new) +} + +func (m *Map[K, V]) convert(value any, ok bool) (V, bool) { + if !ok { + return *new(V), false + } + + return value.(V), true +} diff --git a/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go b/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go new file mode 100644 index 0000000000..3b700bdfbc --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package lock + +import ( + "sort" + "sync" + "sync/atomic" + "time" +) + +// sortableMutexSeq is a global sequence counter for the creation of new +// SortableMutex's with unique sequence numbers. +var sortableMutexSeq atomic.Uint64 + +// sortableMutex implements SortableMutex. Not exported as the only way to +// initialize it is via NewSortableMutex(). +type sortableMutex struct { + sync.Mutex + seq uint64 + acquireDuration time.Duration +} + +func (s *sortableMutex) Lock() { + start := time.Now() + s.Mutex.Lock() + s.acquireDuration += time.Since(start) +} + +func (s *sortableMutex) Seq() uint64 { return s.seq } + +func (s *sortableMutex) AcquireDuration() time.Duration { return s.acquireDuration } + +// SortableMutex provides a Mutex that can be globally sorted with other +// sortable mutexes. This allows deadlock-safe locking of a set of mutexes +// as it guarantees consistent lock ordering. +type SortableMutex interface { + sync.Locker + Seq() uint64 + AcquireDuration() time.Duration // The amount of time it took to acquire the lock +} + +// SortableMutexes is a set of mutexes that can be locked in a safe order. +// Once Lock() is called it must not be mutated! +type SortableMutexes []SortableMutex + +// Len implements sort.Interface. +func (s SortableMutexes) Len() int { + return len(s) +} + +// Less implements sort.Interface. +func (s SortableMutexes) Less(i int, j int) bool { + return s[i].Seq() < s[j].Seq() +} + +// Swap implements sort.Interface. +func (s SortableMutexes) Swap(i int, j int) { + s[i], s[j] = s[j], s[i] +} + +// Lock sorts the mutexes, and then locks them in order. If any lock cannot be acquired, +// this will block while holding the locks with a lower sequence number. +func (s SortableMutexes) Lock() { + sort.Sort(s) + for _, mu := range s { + mu.Lock() + } +} + +// Unlock locks the sorted set of mutexes locked by prior call to Lock(). +func (s SortableMutexes) Unlock() { + for _, mu := range s { + mu.Unlock() + } +} + +var _ sort.Interface = SortableMutexes{} + +func NewSortableMutex() SortableMutex { + seq := sortableMutexSeq.Add(1) + return &sortableMutex{ + seq: seq, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go b/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go index 219b9c83e5..090c351afd 100644 --- a/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go +++ b/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go @@ -14,7 +14,7 @@ type StoppableWaitGroup struct { noopAdd chan struct{} // i is the internal counter which can store tolerate negative values // as opposed the golang's library WaitGroup. - i *int64 + i atomic.Int64 doneOnce, stopOnce sync.Once } @@ -24,7 +24,6 @@ func NewStoppableWaitGroup() *StoppableWaitGroup { return &StoppableWaitGroup{ noopDone: make(chan struct{}), noopAdd: make(chan struct{}), - i: func() *int64 { i := int64(0); return &i }(), doneOnce: sync.Once{}, stopOnce: sync.Once{}, } @@ -65,7 +64,7 @@ func (l *StoppableWaitGroup) Add() { select { case <-l.noopAdd: default: - atomic.AddInt64(l.i, 1) + l.i.Add(1) } } @@ -80,14 +79,14 @@ func (l *StoppableWaitGroup) Done() { default: select { case <-l.noopAdd: - a := atomic.AddInt64(l.i, -1) + a := l.i.Add(-1) if a <= 0 { l.doneOnce.Do(func() { close(l.noopDone) }) } default: - a := atomic.AddInt64(l.i, -1) + a := l.i.Add(-1) select { // in case the channel was close while we where in this default // case we will need to check if 'a' is less than zero and close diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go index f0bc6e1e61..1416ba904e 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go @@ -35,9 +35,15 @@ const ( // EventUUID is an event unique identifier EventUUID = "eventID" + // CNIAttachmentID uniquely identifies an endpoint + CNIAttachmentID = "cniAttachmentID" + // ContainerID is the container identifier ContainerID = "containerID" + // ContainerInterface is the name of the interface in the container namespace + ContainerInterface = "containerInterface" + // IdentityLabels are the labels relevant for the security identity IdentityLabels = "identityLabels" @@ -47,8 +53,8 @@ const ( // Labels are any label, they may not be relevant to the security identity. Labels = "labels" - // Source is the label or node information source - Source = "source" + // SourceFilter is the label or node information source + SourceFilter = "sourceFilter" // Controller is the name of the controller to log it. Controller = "controller" @@ -178,6 +184,9 @@ const ( // Port is a L4 port Port = "port" + // Ports is a list of L4 ports + Ports = "ports" + // PortName is a k8s ContainerPort Name PortName = "portName" @@ -214,7 +223,7 @@ const ( // NewCIDR is the new subnet/CIDR NewCIDR = "newCIDR" - // IPAddrs is a lsit of IP addrs + // IPAddrs is a list of IP addrs IPAddrs = "ipAddrs" // MTU is the maximum transmission unit of one interface @@ -332,9 +341,6 @@ const ( // BPFClockSource denotes the internal clock source (ktime vs jiffies) BPFClockSource = "bpfClockSource" - // BPFInsnSet denotes the instruction set version - BPFInsnSet = "bpfInsnSet" - // CiliumLocalRedirectPolicyName is the name of a CiliumLocalRedirectPolicy CiliumLocalRedirectName = "ciliumLocalRedirectPolicyName" @@ -650,14 +656,15 @@ const ( // WorkQueueSyncBackoff is the backoff time used by workqueues before an attempt to retry sync with k8s-apiserver. WorkQueueSyncBackOff = "workQueueSyncBackOff" - // CESSliceMode indicates the name of algorithm used to batch CEPs in a CES. - CESSliceMode = "ciliumEndpointSliceMode" - // SourceIP is a source IP SourceIP = "sourceIP" DestinationIP = "destinationIP" + LocalIP = "localIP" + + RemoteIP = "remoteIP" + SourceCIDR = "sourceCIDR" // DestinationCIDR is a destination CIDR @@ -732,4 +739,7 @@ const ( // State is the state of an individual component (apiserver, kvstore etc) State = "state" + + // EtcdClusterID is the ID of the etcd cluster + EtcdClusterID = "etcdClusterID" ) diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logging.go b/vendor/github.com/cilium/cilium/pkg/logging/logging.go index ccb7fb1353..91dca60bcf 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logging.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logging.go @@ -28,6 +28,7 @@ const ( FormatOpt = "format" LogFormatText LogFormat = "text" + LogFormatTextTimestamp LogFormat = "text-ts" LogFormatJSON LogFormat = "json" LogFormatJSONTimestamp LogFormat = "json-ts" @@ -35,13 +36,20 @@ const ( // we want to use (possible values: text or json) DefaultLogFormat LogFormat = LogFormatText + // DefaultLogFormatTimestamp is the string representation of the default logrus.Formatter + // including timestamps. + // We don't use this for general runtime logs since kubernetes log capture handles those. + // This is only used for applications such as CNI which is written to disk so we have no + // way to correlate with other logs. + DefaultLogFormatTimestamp LogFormat = LogFormatTextTimestamp + // DefaultLogLevel is the default log level we want to use for our logrus.Formatter DefaultLogLevel logrus.Level = logrus.InfoLevel ) // DefaultLogger is the base logrus logger. It is different from the logrus // default to avoid external dependencies from writing out unexpectedly -var DefaultLogger = InitializeDefaultLogger() +var DefaultLogger = initializeDefaultLogger() func initializeKLog() { log := DefaultLogger.WithField(logfields.LogSubsys, "klog") @@ -73,10 +81,11 @@ func initializeKLog() { // LogOptions maps configuration key-value pairs related to logging. type LogOptions map[string]string -// InitializeDefaultLogger returns a logrus Logger with a custom text formatter. -func InitializeDefaultLogger() (logger *logrus.Logger) { +// initializeDefaultLogger returns a logrus Logger with the default logging +// settings. +func initializeDefaultLogger() (logger *logrus.Logger) { logger = logrus.New() - logger.SetFormatter(GetFormatter(DefaultLogFormat)) + logger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp)) logger.SetLevel(DefaultLogLevel) return } @@ -103,16 +112,16 @@ func (o LogOptions) GetLogLevel() (level logrus.Level) { func (o LogOptions) GetLogFormat() LogFormat { formatOpt, ok := o[FormatOpt] if !ok { - return DefaultLogFormat + return DefaultLogFormatTimestamp } formatOpt = strings.ToLower(formatOpt) - re := regexp.MustCompile(`^(text|json|json-ts)$`) + re := regexp.MustCompile(`^(text|text-ts|json|json-ts)$`) if !re.MatchString(formatOpt) { logrus.WithError( - fmt.Errorf("incorrect log format configured '%s', expected 'text', 'json' or 'json-ts'", formatOpt), + fmt.Errorf("incorrect log format configured '%s', expected 'text', 'text-ts', 'json' or 'json-ts'", formatOpt), ).Warning("Ignoring user-configured log format") - return DefaultLogFormat + return DefaultLogFormatTimestamp } return LogFormat(formatOpt) @@ -140,7 +149,7 @@ func SetLogFormat(logFormat LogFormat) { // SetDefaultLogFormat updates the DefaultLogger with the DefaultLogFormat func SetDefaultLogFormat() { - DefaultLogger.SetFormatter(GetFormatter(DefaultLogFormat)) + DefaultLogger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp)) } // AddHooks adds additional logrus hook to default logger @@ -201,6 +210,11 @@ func GetFormatter(format LogFormat) logrus.Formatter { DisableTimestamp: true, DisableColors: true, } + case LogFormatTextTimestamp: + return &logrus.TextFormatter{ + DisableTimestamp: false, + DisableColors: true, + } case LogFormatJSON: return &logrus.JSONFormatter{ DisableTimestamp: true, diff --git a/vendor/github.com/cilium/cilium/pkg/mac/mac.go b/vendor/github.com/cilium/cilium/pkg/mac/mac.go index f846edb4fe..1938964d72 100644 --- a/vendor/github.com/cilium/cilium/pkg/mac/mac.go +++ b/vendor/github.com/cilium/cilium/pkg/mac/mac.go @@ -107,7 +107,7 @@ func (m *MAC) UnmarshalJSON(data []byte) error { func GenerateRandMAC() (MAC, error) { buf := make([]byte, 6) if _, err := rand.Read(buf); err != nil { - return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %s", err) + return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %w", err) } // Set locally administered addresses bit and reset multicast bit diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go index ec5816542e..c13ca2587d 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go @@ -8,10 +8,11 @@ import ( "encoding/json" "fmt" "os/exec" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/time" ) type bpfCollector struct { diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/cell.go b/vendor/github.com/cilium/cilium/pkg/metrics/cell.go index e48aebe005..15d35c8dc6 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/cell.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/cell.go @@ -6,7 +6,13 @@ package metrics import "github.com/cilium/cilium/pkg/hive/cell" var Cell = cell.Module("metrics", "Metrics", - cell.Invoke(NewRegistry), + // Provide registry to hive, but also invoke if case no cells decide to use as dependency + cell.Provide(NewRegistry), cell.Metric(NewLegacyMetrics), cell.Config(defaultRegistryConfig), + cell.Invoke(func(_ *Registry) { + // This is a hack to ensure that errors/warnings collected in the pre hive initialization + // phase are emitted as metrics. + FlushLoggingMetrics() + }), ) diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go index 7e32c4c11a..016f2bc58d 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go @@ -24,13 +24,14 @@ var ( NoOpMetric prometheus.Metric = &mockMetric{} NoOpCollector prometheus.Collector = &collector{} - NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector} - NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector} - NoOpObserver metricpkg.Observer = &observer{} - NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector} - NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector} - NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector} - NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector} + NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector} + NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector} + NoOpObserver metricpkg.Observer = &observer{} + NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector} + NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector} + NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector} + NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector} + NoOpGaugeDeletableVec metricpkg.DeletableVec[metricpkg.Gauge] = &gaugeDeletableVec{gaugeVec{NoOpCollector}} ) // Metric @@ -156,6 +157,24 @@ func (g *gauge) Opts() metricpkg.Opts { return metricpkg.Opts{} } // GaugeVec +type gaugeDeletableVec struct { + gaugeVec +} + +func (*gaugeDeletableVec) Delete(ll prometheus.Labels) bool { + return false +} + +func (*gaugeDeletableVec) DeleteLabelValues(lvs ...string) bool { + return false +} + +func (*gaugeDeletableVec) DeletePartialMatch(labels prometheus.Labels) int { + return 0 +} + +func (*gaugeDeletableVec) Reset() {} + type gaugeVec struct { prometheus.Collector } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go index 62c368ecea..718ed44195 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go @@ -6,39 +6,54 @@ package metrics import ( "fmt" "reflect" + "sync" + "sync/atomic" "github.com/sirupsen/logrus" - "github.com/cilium/cilium/pkg/components" "github.com/cilium/cilium/pkg/logging/logfields" - "github.com/cilium/cilium/pkg/metrics/metric" ) +var ( + metricsInitialized chan struct{} = make(chan struct{}) + flushMetrics = sync.Once{} +) + +// FlushLoggingMetrics will cause all logging hook metrics accumulated prior +// to the errors_warnings metrics being registered with the Prometheus collector +// to be incremented to their respective errors_warnings metrics tuple. +func FlushLoggingMetrics() { + flushMetrics.Do(func() { + if metricsInitialized != nil { + close(metricsInitialized) + } + }) +} + // LoggingHook is a hook for logrus which counts error and warning messages as a // Prometheus metric. type LoggingHook struct { - metric metric.Vec[metric.Counter] + errs, warn atomic.Uint64 } // NewLoggingHook returns a new instance of LoggingHook for the given Cilium // component. -func NewLoggingHook(component string) *LoggingHook { - // NOTE(mrostecki): For now errors and warning metric exists only for Cilium - // daemon, but support of Prometheus metrics in some other components (i.e. - // cilium-health - GH-4268) is planned. - - // Pick a metric for the component. - var metric metric.Vec[metric.Counter] - switch component { - case components.CiliumAgentName: - metric = ErrorsWarnings - case components.CiliumOperatortName: - metric = ErrorsWarnings - default: - panic(fmt.Sprintf("component %s is unsupported by LoggingHook", component)) - } - - return &LoggingHook{metric: metric} +func NewLoggingHook() *LoggingHook { + lh := &LoggingHook{} + go func() { + // This channel is closed after registry is created. At this point if the errs/warnings metric + // is enabled we flush counts of errors/warnings we collected before the registry was created. + // This is a hack to ensure that errors/warnings collected in the pre hive initialization + // phase are emitted as metrics. + // Because the ErrorsWarnings metric is a counter, this means that the rate of these errors won't be + // accurate, however init errors can only happen during initialization so it probably doesn't make + // a big difference in practice. + <-metricsInitialized + metricsInitialized = nil + ErrorsWarnings.WithLabelValues(logrus.ErrorLevel.String(), "init").Add(float64(lh.errs.Load())) + ErrorsWarnings.WithLabelValues(logrus.WarnLevel.String(), "init").Add(float64(lh.warn.Load())) + }() + return lh } // Levels returns the list of logging levels on which the hook is triggered. @@ -66,8 +81,16 @@ func (h *LoggingHook) Fire(entry *logrus.Entry) error { return fmt.Errorf("type of the 'subsystem' log entry field is not string but %s", reflect.TypeOf(iSubsystem)) } + // We count errors/warnings outside of the prometheus metric. + switch entry.Level { + case logrus.ErrorLevel: + h.errs.Add(1) + case logrus.WarnLevel: + h.warn.Add(1) + } + // Increment the metric. - h.metric.WithLabelValues(entry.Level.String(), subsystem).Inc() + ErrorsWarnings.WithLabelValues(entry.Level.String(), subsystem).Inc() return nil } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go new file mode 100644 index 0000000000..28c24f7f24 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package collections + +// CartesianProduct returns the cartesian product of the input vectors as +// a vector of vectors, each with length the same as the number of input vectors. +func CartesianProduct[T any](vs ...[]T) [][]T { + if len(vs) == 0 { + return [][]T{} + } + + dimension := len(vs) // Each output will be a vector of this length. + // Iterate to find out the number of output vectors. + size := len(vs[0]) + for i := 1; i < len(vs); i++ { + size *= len(vs[i]) + } + + // Allocate the output vectors. + dst := make([][]T, size) + for i := range dst { + dst[i] = make([]T, dimension) + } + + lastm := 1 + for i := 0; i < dimension; i++ { + permuteColumn[T](dst, i, lastm, vs[i]) + lastm = lastm * len(vs[i]) + } + return dst +} + +// permuteColumn fills in the nth column of the output vectors of the cartesian +// product of the input vectors. +// +// leftPermSize is the number of vectors as a result of permuting 0,..,col-1 columns. +// That is, this is the block size upon which we will repeat the values of v0 such that +// every previous permutation is again permuted with each value of v0. +// +// For ex. +// CartesianProduct[string]({"a", "b"}, {"x", "y", "z"}) +// +// Iteration (i.e. col, leftPermSize=1) 1: +// +// dst = [ +// ["a"], +// ["b"], +// ["a"] +// ["b"] +// ["a"] +// ["b"] +// ] +// +// Iteration (leftPermSize=2): +// +// dst = [ +// ["a", "x"], // <- each elem of vec is repeated leftPermSize times. +// ["b", "x"], +// ["a", "y"] +// ["b", "y"] +// ["a", "z"] +// ["b", "z"] +// ] +func permuteColumn[T any](dst [][]T, col int, leftPermSize int, vec []T) { + // Go down the column with the current lhs. + // You want to skip along, lastm elements at a time. + for i := 0; i < len(dst); i += leftPermSize { // So we're skipping n rows at a time, + vi := (i / leftPermSize) % len(vec) + for off := 0; off < leftPermSize; off++ { // this is a repeat + dst[i+off][col] = vec[vi] + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go index 4755a468dc..bcbf938c6e 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go @@ -60,7 +60,9 @@ func (c *counter) Add(val float64) { } } -func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] { +// NewCounterVec creates a new DeletableVec[Counter] based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *counterVec { return &counterVec{ CounterVec: prometheus.NewCounterVec(opts.toPrometheus(), labelNames), metric: metric{ @@ -70,12 +72,47 @@ func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] } } +// NewCounterVecWithLabels creates a new DeletableVec[Counter] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +// +// For example: +// +// NewCounterVecWithLabels(CounterOpts{ +// Namespace: "cilium", +// Subsystem: "subsystem", +// Name: "cilium_test", +// Disabled: false, +// }, Labels{ +// {Name: "foo", Values: NewValues("0", "1")}, +// {Name: "bar", Values: NewValues("a", "b")}, +// }) +// +// Will initialize the following metrics to: +// +// cilium_subsystem_cilium_test{foo="0", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="0", bar="b"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="b"} 0 +func NewCounterVecWithLabels(opts CounterOpts, labels Labels) *counterVec { + cv := NewCounterVec(opts, labels.labelNames()) + initLabels[Counter](&cv.metric, labels, cv, opts.Disabled) + return cv +} + type counterVec struct { *prometheus.CounterVec metric } func (cv *counterVec) CurryWith(labels prometheus.Labels) (Vec[Counter], error) { + cv.checkLabels(labels) vec, err := cv.CounterVec.CurryWith(labels) if err == nil { return &counterVec{CounterVec: vec, metric: cv.metric}, nil @@ -118,6 +155,7 @@ func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { } func (cv *counterVec) With(labels prometheus.Labels) Counter { + cv.checkLabels(labels) if !cv.enabled { return &counter{ metric: metric{enabled: false}, @@ -132,6 +170,7 @@ func (cv *counterVec) With(labels prometheus.Labels) Counter { } func (cv *counterVec) WithLabelValues(lvs ...string) Counter { + cv.checkLabelValues(lvs...) if !cv.enabled { return &counter{ metric: metric{enabled: false}, diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go index 445afde06d..083caef554 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go @@ -95,14 +95,51 @@ func (g *gauge) SetToCurrentTime() { } } -func NewGaugeVec(opts GaugeOpts, labelNames []string) DeletableVec[Gauge] { - return &gaugeVec{ +// NewGaugeVec creates a new DeletableVec[Gauge] based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *gaugeVec { + gv := &gaugeVec{ GaugeVec: prometheus.NewGaugeVec(opts.toPrometheus(), labelNames), metric: metric{ enabled: !opts.Disabled, opts: Opts(opts), }, } + return gv +} + +// NewGaugeVecWithLabels creates a new DeletableVec[Gauge] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +// +// For example: +// +// NewGaugeVecWithLabels(GaugeOpts{ +// Namespace: "cilium", +// Subsystem: "subsystem", +// Name: "cilium_test", +// Disabled: false, +// }, Labels{ +// {Name: "foo", Values: NewValues("0", "1")}, +// {Name: "bar", Values: NewValues("a", "b")}, +// }) +// +// Will initialize the following metrics to: +// +// cilium_subsystem_cilium_test{foo="0", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="0", bar="b"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="b"} 0 +func NewGaugeVecWithLabels(opts GaugeOpts, labels Labels) *gaugeVec { + gv := NewGaugeVec(opts, labels.labelNames()) + initLabels[Gauge](&gv.metric, labels, gv, opts.Disabled) + return gv } type gaugeVec struct { @@ -111,6 +148,7 @@ type gaugeVec struct { } func (gv *gaugeVec) CurryWith(labels prometheus.Labels) (Vec[Gauge], error) { + gv.checkLabels(labels) vec, err := gv.GaugeVec.CurryWith(labels) if err == nil { return &gaugeVec{GaugeVec: vec, metric: gv.metric}, nil @@ -158,6 +196,7 @@ func (gv *gaugeVec) With(labels prometheus.Labels) Gauge { metric: metric{enabled: false}, } } + gv.checkLabels(labels) promGauge := gv.GaugeVec.With(labels) return &gauge{ @@ -167,6 +206,7 @@ func (gv *gaugeVec) With(labels prometheus.Labels) Gauge { } func (gv *gaugeVec) WithLabelValues(lvs ...string) Gauge { + gv.checkLabelValues(lvs...) if !gv.enabled { return &gauge{ metric: metric{enabled: false}, diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go index f1ddb526a2..6d499707d0 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go @@ -71,7 +71,9 @@ func (o *observer) Observe(val float64) { } } -func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] { +// NewHistogramVec creates a new Vec[Observer] (i.e. Histogram Vec) based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *histogramVec { return &histogramVec{ ObserverVec: prometheus.NewHistogramVec(opts.toPrometheus(), labelNames), metric: metric{ @@ -81,12 +83,28 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] { } } +// NewHistogramVec creates a new Vec[Observer] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +func NewHistogramVecWithLabels(opts HistogramOpts, labels Labels) *histogramVec { + hv := NewHistogramVec(opts, labels.labelNames()) + initLabels(&hv.metric, labels, hv, opts.Disabled) + return hv +} + type histogramVec struct { prometheus.ObserverVec metric } func (cv *histogramVec) CurryWith(labels prometheus.Labels) (Vec[Observer], error) { + cv.checkLabels(labels) vec, err := cv.ObserverVec.CurryWith(labels) if err == nil { return &histogramVec{ObserverVec: vec, metric: cv.metric}, nil @@ -134,6 +152,7 @@ func (cv *histogramVec) With(labels prometheus.Labels) Observer { metric: metric{enabled: false}, } } + cv.checkLabels(labels) promObserver := cv.ObserverVec.With(labels) return &observer{ @@ -148,6 +167,7 @@ func (cv *histogramVec) WithLabelValues(lvs ...string) Observer { metric: metric{enabled: false}, } } + cv.checkLabelValues(lvs...) promObserver := cv.ObserverVec.WithLabelValues(lvs...) return &observer{ diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go index a41d079f72..580e8ff32c 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go @@ -4,9 +4,18 @@ package metric import ( + "fmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics/metric/collections" ) +var logger = logrus.WithField(logfields.LogSubsys, "metric") + // WithMetadata is the interface implemented by any metric defined in this package. These typically embed existing // prometheus metric types and add additional metadata. In addition, these metrics have the concept of being enabled // or disabled which is used in place of conditional registration so all metric types can always be registered. @@ -20,6 +29,53 @@ type WithMetadata interface { type metric struct { enabled bool opts Opts + labels *labelSet +} + +// forEachLabelVector performs a product of all possible label value combinations +// and calls the provided function for each combination. +func (b *metric) forEachLabelVector(fn func(lvls []string)) { + if b.labels == nil { + return + } + var labelValues [][]string + for _, label := range b.labels.lbls { + labelValues = append(labelValues, maps.Keys(label.Values)) + } + for _, labelVector := range collections.CartesianProduct(labelValues...) { + fn(labelVector) + } +} + +// checkLabelValues checks that the provided label values are within the range +// of provided label values, if labels where defined using the Labels type. +// Violations are logged as errors for detection, but metrics should still +// be collected as is. +func (b *metric) checkLabelValues(lvs ...string) { + if b.labels == nil { + return + } + if err := b.labels.checkLabelValues(lvs); err != nil { + logger.WithError(err). + WithFields(logrus.Fields{ + "metric": b.opts.Name, + }). + Warning("metric label constraints violated, metric will still be collected") + } +} + +func (b *metric) checkLabels(labels prometheus.Labels) { + if b.labels == nil { + return + } + + if err := b.labels.checkLabels(labels); err != nil { + logger.WithError(err). + WithFields(logrus.Fields{ + "metric": b.opts.Name, + }). + Warning("metric label constraints violated, metric will still be collected") + } } func (b *metric) IsEnabled() bool { @@ -189,3 +245,99 @@ type Opts struct { // If true, the metric has to be explicitly enabled via config or flags Disabled bool } + +func (b Opts) GetConfigName() string { + if b.ConfigName == "" { + return prometheus.BuildFQName(b.Namespace, b.Subsystem, b.Name) + } + return b.ConfigName +} + +// Label represents a metric label with a pre-defined range of values. +// This is used with the NewxxxVecWithLabels metrics constructors to initialize +// vector metrics with known label value ranges, avoiding empty metrics. +type Label struct { + Name string + // If defined, only these values are allowed. + Values Values +} + +// Values is a distinct set of possible label values for a particular Label. +type Values map[string]struct{} + +// NewValues constructs a Values type from a set of strings. +func NewValues(vs ...string) Values { + vals := Values{} + for _, v := range vs { + vals[v] = struct{}{} + } + return vals +} + +// Labels is a slice of labels that represents a label set for a vector type +// metric. +type Labels []Label + +func (lbls Labels) labelNames() []string { + lns := make([]string, len(lbls)) + for i, label := range lbls { + lns[i] = label.Name + } + return lns +} + +type labelSet struct { + lbls Labels + m map[string]map[string]struct{} +} + +func (l *labelSet) namesToValues() map[string]map[string]struct{} { + if l.m != nil { + return l.m + } + l.m = make(map[string]map[string]struct{}) + for _, label := range l.lbls { + l.m[label.Name] = label.Values + } + return l.m +} + +func (l *labelSet) checkLabels(labels prometheus.Labels) error { + for name, value := range labels { + if lvs, ok := l.namesToValues()[name]; ok { + if _, ok := lvs[value]; !ok { + return fmt.Errorf("unexpected label vector value for label %q: value %q not defined in label range %v", + name, value, maps.Keys(lvs)) + } + } else { + return fmt.Errorf("invalid label name: %s", name) + } + } + return nil +} + +func (l *labelSet) checkLabelValues(lvs []string) error { + if len(l.lbls) != len(lvs) { + return fmt.Errorf("unexpected label vector length: expected %d, got %d", len(l.lbls), len(lvs)) + } + for i, label := range l.lbls { + if _, ok := label.Values[lvs[i]]; !ok { + return fmt.Errorf("unexpected label vector value for label %q: value %q not defined in label range %v", + label.Name, lvs[i], maps.Keys(label.Values)) + } + } + return nil +} + +// initLabels is a helper function to initialize the labels of a metric. +// It is used by xxxVecWithLabels metrics constructors to initialize the +// labels of the metric and the vector (i.e. registering all possible label value combinations). +func initLabels[T any](m *metric, labels Labels, vec Vec[T], disabled bool) { + if disabled { + return + } + m.labels = &labelSet{lbls: labels} + m.forEachLabelVector(func(vs []string) { + vec.WithLabelValues(vs...) + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go index e75db20edb..4f4b1b6e0e 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go @@ -12,7 +12,6 @@ package metrics import ( "context" - "time" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" @@ -21,6 +20,8 @@ import ( "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/metrics/metric" "github.com/cilium/cilium/pkg/promise" + "github.com/cilium/cilium/pkg/source" + "github.com/cilium/cilium/pkg/time" "github.com/cilium/cilium/pkg/version" ) @@ -53,6 +54,9 @@ const ( // SubsystemK8sClient is the subsystem to scope metrics related to the kubernetes client. SubsystemK8sClient = "k8s_client" + // SubsystemWorkQueue is the subsystem to scope metrics related to the workqueue. + SubsystemWorkQueue = "k8s_workqueue" + // SubsystemKVStore is the subsystem to scope metrics related to the kvstore. SubsystemKVStore = "kvstore" @@ -79,6 +83,9 @@ const ( // Cilium KVStoreMesh CiliumKVStoreMeshNamespace = "cilium_kvstoremesh" + // CiliumOperatorNamespace is used to scope metrics from the Cilium Operator + CiliumOperatorNamespace = "cilium_operator" + // LabelError indicates the type of error (string) LabelError = "error" @@ -90,12 +97,25 @@ const ( // Labels + // LabelValueFalse is the string value for true metric label values. + LabelValueTrue = "true" + + // LabelValueFalse is the string value for false metric label values. + LabelValueFalse = "false" + // LabelValueOutcomeSuccess is used as a successful outcome of an operation LabelValueOutcomeSuccess = "success" // LabelValueOutcomeFail is used as an unsuccessful outcome of an operation LabelValueOutcomeFail = "fail" + // LabelValueOutcomeFailure is used as an unsuccessful outcome of an operation. + // NOTE: This should only be used for existing metrics, new metrics should use LabelValueOutcomeFail. + LabelValueOutcomeFailure = "failure" + + // LabelDropReason is used to describe reason for dropping a packets/bytes + LabelDropReason = "reason" + // LabelEventSourceAPI marks event-related metrics that come from the API LabelEventSourceAPI = "api" @@ -136,6 +156,8 @@ const ( // LabelPolicySource is the label used to see the enforcement status LabelPolicySource = "source" + LabelSource = "source" + // LabelScope is the label used to defined multiples scopes in the same // metric. For example, one counter may measure a metric over the scope of // the entire event (scope=global), or just part of an event @@ -179,6 +201,8 @@ const ( // LabelMapName is the label for the BPF map name LabelMapName = "map_name" + LabelMapGroup = "map_group" + // LabelVersion is the label for the version number LabelVersion = "version" @@ -213,6 +237,12 @@ const ( LabelLocationRemoteIntraCluster = "remote_intra_cluster" LabelLocationRemoteInterCluster = "remote_inter_cluster" + // Rule label is a label for a L7 rule name. + LabelL7Rule = "rule" + + // LabelL7ProxyType is the label for denoting a L7 proxy type. + LabelL7ProxyType = "proxy_type" + // LabelType is the label for type in general (e.g. endpoint, node) LabelType = "type" LabelPeerEndpoint = "endpoint" @@ -227,6 +257,9 @@ const ( ) var ( + // LabelValuesBool is metric label value set for boolean type. + LabelValuesBool = metric.NewValues(LabelValueTrue, LabelValueFalse) + // Namespace is used to scope metrics from cilium. It is prepended to metric // names and separated with a '_' Namespace = CiliumAgentNamespace @@ -246,11 +279,11 @@ var ( // NodeConnectivityStatus is the connectivity status between local node to // other node intra or inter cluster. - NodeConnectivityStatus = NoOpGaugeVec + NodeConnectivityStatus = NoOpGaugeDeletableVec // NodeConnectivityLatency is the connectivity latency between local node to // other node intra or inter cluster. - NodeConnectivityLatency = NoOpGaugeVec + NodeConnectivityLatency = NoOpGaugeDeletableVec // Endpoint @@ -258,6 +291,9 @@ var ( // It must be thread-safe. Endpoint metric.GaugeFunc + // EndpointMaxIfindex is the maximum observed interface index for existing endpoints + EndpointMaxIfindex = NoOpGauge + // EndpointRegenerationTotal is a count of the number of times any endpoint // has been regenerated and success/fail outcome EndpointRegenerationTotal = NoOpCounterVec @@ -288,11 +324,6 @@ var ( // PolicyRevision is the current policy revision number for this agent PolicyRevision = NoOpGauge - // PolicyImportErrorsTotal is a count of failed policy imports. - // This metric was deprecated in Cilium 1.14 and is to be removed in 1.15. - // It is replaced by PolicyChangeTotal metric. - PolicyImportErrorsTotal = NoOpCounter - // PolicyChangeTotal is a count of policy changes by outcome ("success" or // "failure") PolicyChangeTotal = NoOpCounterVec @@ -309,14 +340,14 @@ var ( // CIDRGroup + // CIDRGroupsReferenced is the number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. + // CNPs with empty or non-existing CIDRGroupRefs are not considered. + CIDRGroupsReferenced = NoOpGauge + // CIDRGroupTranslationTimeStats is the time taken to translate the policy field `FromCIDRGroupRef` // after the referenced CIDRGroups have been updated or deleted. CIDRGroupTranslationTimeStats = NoOpHistogram - // CIDRGroupPolicies is the number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. - // CNPs with empty or non-existing CIDRGroupRefs are not considered - CIDRGroupPolicies = NoOpGauge - // Identity // Identity is the number of identities currently in use on the node by type @@ -324,11 +355,9 @@ var ( // Events - // EventTS*is the time in seconds since epoch that we last received an - // event that we will handle - // source is one of k8s, docker or apia - - // EventTS is the timestamp of k8s resource events. + // EventTS is the time in seconds since epoch that we last received an + // event that was handled by Cilium. This metric tracks the source of the + // event which can be one of K8s or Cilium's API. EventTS = NoOpGaugeVec // EventLagK8s is the lag calculation for k8s Pod events. @@ -342,22 +371,6 @@ var ( // ProxyPolicyL7Total is a count of all l7 requests handled by proxy ProxyPolicyL7Total = NoOpCounterVec - // ProxyParseErrors is a count of failed parse errors on proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyParseErrors = NoOpCounter - - // ProxyForwarded is a count of all forwarded requests by proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyForwarded = NoOpCounter - - // ProxyDenied is a count of all denied requests by policy by the proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyDenied = NoOpCounter - - // ProxyReceived is a count of all received requests by the proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyReceived = NoOpCounter - // ProxyUpstreamTime is how long the upstream server took to reply labeled // by error, protocol and span time ProxyUpstreamTime = NoOpObserverVec @@ -368,22 +381,6 @@ var ( // L3-L4 statistics - // DropCount is the total drop requests, - // tagged by drop reason and direction(ingress/egress) - DropCount = NoOpCounterVec - - // DropBytes is the total dropped bytes, - // tagged by drop reason and direction(ingress/egress) - DropBytes = NoOpCounterVec - - // ForwardCount is the total forwarded packets, - // tagged by ingress/egress direction - ForwardCount = NoOpCounterVec - - // ForwardBytes is the total forwarded bytes, - // tagged by ingress/egress direction - ForwardBytes = NoOpCounterVec - // Datapath statistics // ConntrackGCRuns is the number of times that the conntrack GC @@ -461,9 +458,14 @@ var ( // IPAM events - // IpamEvent is the number of IPAM events received labeled by action and + // IPAMEvent is the number of IPAM events received labeled by action and // datapath family type - IpamEvent = NoOpCounterVec + IPAMEvent = NoOpCounterVec + + // IPAMCapacity tracks the total number of IPs that could be allocated. To + // get the current number of available IPs, it would be this metric + // subtracted by IPAMEvent{allocated}. + IPAMCapacity = NoOpGaugeVec // KVstore events @@ -477,14 +479,6 @@ var ( // KVStoreQuorumErrors records the number of kvstore quorum errors KVStoreQuorumErrors = NoOpCounterVec - // KVStoreSyncQueueSize records the number of elements queued for - // synchronization in the kvstore. - KVStoreSyncQueueSize = NoOpGaugeVec - - // KVStoreInitialSyncCompleted records whether the initial synchronization - // from/to the kvstore has completed. - KVStoreInitialSyncCompleted = NoOpGaugeVec - // FQDNGarbageCollectorCleanedTotal is the number of domains cleaned by the // GC job. FQDNGarbageCollectorCleanedTotal = NoOpCounter @@ -523,6 +517,9 @@ var ( // bpf map. BPFMapOps = NoOpCounterVec + // BPFMapCapacity is the max capacity of bpf maps, labelled by map group classification. + BPFMapCapacity = NoOpGaugeVec + // TriggerPolicyUpdateTotal is the metric to count total number of // policy update triggers TriggerPolicyUpdateTotal = NoOpCounterVec @@ -565,14 +562,89 @@ var ( // APILimiterProcessedRequests is the counter of the number of // processed (successful and failed) requests APILimiterProcessedRequests = NoOpCounterVec + + // WorkQueueDepth is the depth of the workqueue + // + // We set actual metrics here instead of NoOp for the workqueue metrics + // because these metrics will be registered with workqueue.SetProvider + // by init function in watcher.go. Otherwise, we will register NoOps. + // + WorkQueueDepth = metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_depth", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "depth", + Help: "Current depth of workqueue.", + }, []string{"name"}) + + // WorkQueueAddsTotal is the total number of adds to the workqueue + WorkQueueAddsTotal = metric.NewCounterVec(metric.CounterOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_adds_total", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "adds_total", + Help: "Total number of adds handled by workqueue.", + }, []string{"name"}) + + // WorkQueueLatency is the latency of how long an item stays in the workqueue + WorkQueueLatency = metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_queue_duration_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "queue_duration_seconds", + Help: "How long in seconds an item stays in workqueue before being requested.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + // WorkQueueDuration is the duration of how long processing an item for the workqueue + WorkQueueDuration = metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_work_duration_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "work_duration_seconds", + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + // WorkQueueUnfinishedWork is how many seconds of work has been done that is in progress + WorkQueueUnfinishedWork = metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_unfinished_work_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "unfinished_work_seconds", + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + // WorkQueueLongestRunningProcessor is the longest running processor in the workqueue + WorkQueueLongestRunningProcessor = metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_longest_running_processor_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "longest_running_processor_seconds", + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + // WorkQueueRetries is the number of retries for handled by the workqueue + WorkQueueRetries = metric.NewCounterVec(metric.CounterOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_retries_total", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "retries_total", + Help: "Total number of retries handled by workqueue.", + }, []string{"name"}) ) type LegacyMetrics struct { BootstrapTimes metric.Vec[metric.Observer] APIInteractions metric.Vec[metric.Observer] - NodeConnectivityStatus metric.Vec[metric.Gauge] - NodeConnectivityLatency metric.Vec[metric.Gauge] + NodeConnectivityStatus metric.DeletableVec[metric.Gauge] + NodeConnectivityLatency metric.DeletableVec[metric.Gauge] Endpoint metric.GaugeFunc + EndpointMaxIfindex metric.Gauge EndpointRegenerationTotal metric.Vec[metric.Counter] EndpointStateCount metric.Vec[metric.Gauge] EndpointRegenerationTimeStats metric.Vec[metric.Observer] @@ -581,27 +653,18 @@ type LegacyMetrics struct { PolicyRegenerationCount metric.Counter PolicyRegenerationTimeStats metric.Vec[metric.Observer] PolicyRevision metric.Gauge - PolicyImportErrorsTotal metric.Counter PolicyChangeTotal metric.Vec[metric.Counter] PolicyEndpointStatus metric.Vec[metric.Gauge] PolicyImplementationDelay metric.Vec[metric.Observer] + CIDRGroupsReferenced metric.Gauge CIDRGroupTranslationTimeStats metric.Histogram - CIDRGroupPolicies metric.Gauge Identity metric.Vec[metric.Gauge] EventTS metric.Vec[metric.Gauge] EventLagK8s metric.Gauge ProxyRedirects metric.Vec[metric.Gauge] ProxyPolicyL7Total metric.Vec[metric.Counter] - ProxyParseErrors metric.Counter - ProxyForwarded metric.Counter - ProxyDenied metric.Counter - ProxyReceived metric.Counter ProxyUpstreamTime metric.Vec[metric.Observer] ProxyDatapathUpdateTimeout metric.Counter - DropCount metric.Vec[metric.Counter] - DropBytes metric.Vec[metric.Counter] - ForwardCount metric.Vec[metric.Counter] - ForwardBytes metric.Vec[metric.Counter] ConntrackGCRuns metric.Vec[metric.Counter] ConntrackGCKeyFallbacks metric.Vec[metric.Counter] ConntrackGCSize metric.Vec[metric.Gauge] @@ -621,12 +684,11 @@ type LegacyMetrics struct { KubernetesAPICallsTotal metric.Vec[metric.Counter] KubernetesCNPStatusCompletion metric.Vec[metric.Observer] TerminatingEndpointsEvents metric.Counter - IpamEvent metric.Vec[metric.Counter] + IPAMEvent metric.Vec[metric.Counter] + IPAMCapacity metric.Vec[metric.Gauge] KVStoreOperationsDuration metric.Vec[metric.Observer] KVStoreEventsQueueDuration metric.Vec[metric.Observer] KVStoreQuorumErrors metric.Vec[metric.Counter] - KVStoreSyncQueueSize metric.Vec[metric.Gauge] - KVStoreInitialSyncCompleted metric.Vec[metric.Gauge] FQDNGarbageCollectorCleanedTotal metric.Counter FQDNActiveNames metric.Vec[metric.Gauge] FQDNActiveIPs metric.Vec[metric.Gauge] @@ -636,6 +698,7 @@ type LegacyMetrics struct { IPCacheEventsTotal metric.Vec[metric.Counter] BPFSyscallDuration metric.Vec[metric.Observer] BPFMapOps metric.Vec[metric.Counter] + BPFMapCapacity metric.Vec[metric.Gauge] TriggerPolicyUpdateTotal metric.Vec[metric.Counter] TriggerPolicyUpdateFolds metric.Gauge TriggerPolicyUpdateCallDuration metric.Vec[metric.Observer] @@ -647,6 +710,13 @@ type LegacyMetrics struct { APILimiterRateLimit metric.Vec[metric.Gauge] APILimiterAdjustmentFactor metric.Vec[metric.Gauge] APILimiterProcessedRequests metric.Vec[metric.Counter] + WorkQueueDepth metric.Vec[metric.Gauge] + WorkQueueAddsTotal metric.Vec[metric.Counter] + WorkQueueLatency metric.Vec[metric.Observer] + WorkQueueDuration metric.Vec[metric.Observer] + WorkQueueUnfinishedWork metric.Vec[metric.Gauge] + WorkQueueLongestRunningProcessor metric.Vec[metric.Gauge] + WorkQueueRetries metric.Vec[metric.Counter] } func NewLegacyMetrics() *LegacyMetrics { @@ -668,13 +738,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Duration of processed API calls labeled by path, method and return code.", }, []string{LabelPath, LabelMethod, LabelAPIReturnCode}), - EndpointRegenerationTotal: metric.NewCounterVec(metric.CounterOpts{ + EndpointRegenerationTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_endpoint_regenerations_total", Namespace: Namespace, Name: "endpoint_regenerations_total", Help: "Count of all endpoint regenerations that have completed, tagged by outcome", - }, []string{"outcome"}), + }, metric.Labels{ + { + Name: LabelOutcome, + Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure), + }, + }), EndpointStateCount: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_endpoint_state", @@ -721,20 +796,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Highest policy revision number in the agent", }), - PolicyImportErrorsTotal: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_import_errors_total", - Namespace: Namespace, - Name: "policy_import_errors_total", - Help: "Number of times a policy import has failed", - }), - - PolicyChangeTotal: metric.NewCounterVec(metric.CounterOpts{ + PolicyChangeTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_policy_change_total", Namespace: Namespace, Name: "policy_change_total", Help: "Number of policy changes by outcome", - }, []string{"outcome"}), + }, metric.Labels{ + { + Name: LabelOutcome, + Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure), + }, + }), PolicyEndpointStatus: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_policy_endpoint_enforcement_status", @@ -744,29 +817,34 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of endpoints labeled by policy enforcement status", }, []string{LabelPolicyEnforcement}), - PolicyImplementationDelay: metric.NewHistogramVec(metric.HistogramOpts{ + PolicyImplementationDelay: metric.NewHistogramVecWithLabels(metric.HistogramOpts{ ConfigName: Namespace + "_policy_implementation_delay", Namespace: Namespace, Name: "policy_implementation_delay", Help: "Time between a policy change and it being fully deployed into the datapath", - }, []string{LabelPolicySource}), + }, metric.Labels{ + { + Name: LabelPolicySource, + Values: metric.NewValues(string(source.Kubernetes), string(source.CustomResource), string(source.LocalAPI)), + }, + }), - CIDRGroupTranslationTimeStats: metric.NewHistogram(metric.HistogramOpts{ - ConfigName: Namespace + "_cidrgroup_translation_time_stats_seconds", - Disabled: true, + CIDRGroupsReferenced: metric.NewGauge(metric.GaugeOpts{ + ConfigName: Namespace + "cidrgroups_referenced", Namespace: Namespace, - Name: "cidrgroup_translation_time_stats_seconds", - Help: "CIDRGroup translation time stats", + Name: "cidrgroups_referenced", + Help: "Number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. CNPs with empty or non-existing CIDRGroupRefs are not considered", }), - CIDRGroupPolicies: metric.NewGauge(metric.GaugeOpts{ - ConfigName: Namespace + "_cidrgroup_policies", + CIDRGroupTranslationTimeStats: metric.NewHistogram(metric.HistogramOpts{ + ConfigName: Namespace + "cidrgroup_translation_time_stats_seconds", + Disabled: true, Namespace: Namespace, - Name: "cidrgroup_policies", - Help: "Number of CNPs and CCNPs referencing at least one CiliumCIDRGroup", + Name: "cidrgroup_translation_time_stats_seconds", + Help: "CIDRGroup translation time stats", }), Identity: metric.NewGaugeVec(metric.GaugeOpts{ @@ -781,7 +859,7 @@ func NewLegacyMetrics() *LegacyMetrics { ConfigName: Namespace + "_event_ts", Namespace: Namespace, Name: "event_ts", - Help: "Last timestamp when we received an event", + Help: "Last timestamp when Cilium received an event from a control plane source, per resource and per action", }, []string{LabelEventSource, LabelScope, LabelAction}), EventLagK8s: metric.NewGauge(metric.GaugeOpts{ @@ -801,41 +879,20 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of redirects installed for endpoints, labeled by protocol", }, []string{LabelProtocolL7}), - ProxyPolicyL7Total: metric.NewCounterVec(metric.CounterOpts{ + ProxyPolicyL7Total: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_policy_l7_total", - - Namespace: Namespace, - Name: "policy_l7_total", - Help: "Number of total proxy requests handled", - }, []string{"rule"}), - - ProxyParseErrors: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_parse_errors_total", - Namespace: Namespace, - Name: "policy_l7_parse_errors_total", - Help: "Number of total L7 parse errors", - }), - - ProxyForwarded: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_forwarded_total", Namespace: Namespace, - Name: "policy_l7_forwarded_total", - Help: "Number of total L7 forwarded requests/responses", - }), - - ProxyDenied: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_denied_total", - Namespace: Namespace, - Name: "policy_l7_denied_total", - Help: "Number of total L7 denied requests/responses due to policy", - }), - - ProxyReceived: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_received_total", - - Namespace: Namespace, - Name: "policy_l7_received_total", - Help: "Number of total L7 received requests/responses", + Name: "policy_l7_total", + Help: "Number of total proxy requests handled", + }, metric.Labels{ + { + Name: LabelL7Rule, + Values: metric.NewValues("received", "forwarded", "denied", "parse_errors"), + }, + { + Name: LabelL7ProxyType, + Values: metric.NewValues("fqdn", "envoy"), + }, }), ProxyUpstreamTime: metric.NewHistogramVec(metric.HistogramOpts{ @@ -854,38 +911,6 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of total datapath update timeouts due to FQDN IP updates", }), - DropCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_drop_count_total", - Namespace: Namespace, - Name: "drop_count_total", - Help: "Total dropped packets, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}), - - DropBytes: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_drop_bytes_total", - Namespace: Namespace, - Name: "drop_bytes_total", - Help: "Total dropped bytes, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}), - - ForwardCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_forward_count_total", - Namespace: Namespace, - Name: "forward_count_total", - Help: "Total forwarded packets, tagged by ingress/egress direction", - }, - []string{LabelDirection}), - - ForwardBytes: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_forward_bytes_total", - Namespace: Namespace, - Name: "forward_bytes_total", - Help: "Total forwarded bytes, tagged by ingress/egress direction", - }, - []string{LabelDirection}), - ConntrackGCRuns: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total", Namespace: Namespace, @@ -956,12 +981,7 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of services events labeled by action type", }, []string{LabelAction}), - ErrorsWarnings: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_errors_warnings_total", - Namespace: Namespace, - Name: "errors_warnings_total", - Help: "Number of total errors in cilium-agent instances", - }, []string{"level", "subsystem"}), + ErrorsWarnings: newErrorsWarningsMetric(), ControllerRuns: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_controllers_runs_total", @@ -1039,13 +1059,20 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of terminating endpoint events received from Kubernetes", }), - IpamEvent: metric.NewCounterVec(metric.CounterOpts{ + IPAMEvent: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_ipam_events_total", Namespace: Namespace, Name: "ipam_events_total", Help: "Number of IPAM events received labeled by action and datapath family type", }, []string{LabelAction, LabelDatapathFamily}), + IPAMCapacity: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_ipam_capacity", + Namespace: Namespace, + Name: "ipam_capacity", + Help: "Total number of IPs in the IPAM pool labeled by family", + }, []string{LabelDatapathFamily}), + KVStoreOperationsDuration: metric.NewHistogramVec(metric.HistogramOpts{ ConfigName: Namespace + "_" + SubsystemKVStore + "_operations_duration_seconds", Namespace: Namespace, @@ -1071,22 +1098,6 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of quorum errors", }, []string{LabelError}), - KVStoreSyncQueueSize: metric.NewGaugeVec(metric.GaugeOpts{ - ConfigName: Namespace + "_" + SubsystemKVStore + "_sync_queue_size", - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "sync_queue_size", - Help: "Number of elements queued for synchronization in the kvstore", - }, []string{LabelScope, LabelSourceCluster}), - - KVStoreInitialSyncCompleted: metric.NewGaugeVec(metric.GaugeOpts{ - ConfigName: Namespace + "_" + SubsystemKVStore + "_initial_sync_completed", - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "initial_sync_completed", - Help: "Whether the initial synchronization from/to the kvstore has completed", - }, []string{LabelScope, LabelSourceCluster, LabelAction}), - IPCacheErrorsTotal: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemIPCache + "_errors_total", Namespace: Namespace, @@ -1165,6 +1176,14 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Total operations on map, tagged by map name", }, []string{LabelMapName, LabelOperation, LabelOutcome}), + BPFMapCapacity: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemBPF + "_map_capacity", + Namespace: Namespace, + Subsystem: SubsystemBPF, + Name: "map_capacity", + Help: "Capacity of map, tagged by map group. All maps with a capacity of 65536 are grouped under 'default'", + }, []string{LabelMapGroup}), + TriggerPolicyUpdateTotal: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_total", Namespace: Namespace, @@ -1251,7 +1270,7 @@ func NewLegacyMetrics() *LegacyMetrics { Subsystem: SubsystemAPILimiter, Name: "processed_requests_total", Help: "Total number of API requests processed", - }, []string{"api_call", LabelOutcome}), + }, []string{"api_call", LabelOutcome, LabelAPIReturnCode}), EndpointPropagationDelay: metric.NewHistogramVec(metric.HistogramOpts{ ConfigName: Namespace + "_endpoint_propagation_delay_seconds", @@ -1291,16 +1310,35 @@ func NewLegacyMetrics() *LegacyMetrics { LabelProtocol, LabelAddressType, }), + + WorkQueueDepth: WorkQueueDepth, + WorkQueueAddsTotal: WorkQueueAddsTotal, + WorkQueueLatency: WorkQueueLatency, + WorkQueueDuration: WorkQueueDuration, + WorkQueueUnfinishedWork: WorkQueueUnfinishedWork, + WorkQueueLongestRunningProcessor: WorkQueueLongestRunningProcessor, + WorkQueueRetries: WorkQueueRetries, + } + + ifindexOpts := metric.GaugeOpts{ + ConfigName: Namespace + "_endpoint_max_ifindex", + Disabled: !enableIfIndexMetric(), + Namespace: Namespace, + Name: "endpoint_max_ifindex", + Help: "Maximum interface index observed for existing endpoints", } + lm.EndpointMaxIfindex = metric.NewGauge(ifindexOpts) v := version.GetCiliumVersion() lm.VersionMetric.WithLabelValues(v.Version, v.Revision, v.Arch) + lm.BPFMapCapacity.WithLabelValues("default").Set(DefaultMapCapacity) BootstrapTimes = lm.BootstrapTimes APIInteractions = lm.APIInteractions NodeConnectivityStatus = lm.NodeConnectivityStatus NodeConnectivityLatency = lm.NodeConnectivityLatency Endpoint = lm.Endpoint + EndpointMaxIfindex = lm.EndpointMaxIfindex EndpointRegenerationTotal = lm.EndpointRegenerationTotal EndpointStateCount = lm.EndpointStateCount EndpointRegenerationTimeStats = lm.EndpointRegenerationTimeStats @@ -1309,27 +1347,18 @@ func NewLegacyMetrics() *LegacyMetrics { PolicyRegenerationCount = lm.PolicyRegenerationCount PolicyRegenerationTimeStats = lm.PolicyRegenerationTimeStats PolicyRevision = lm.PolicyRevision - PolicyImportErrorsTotal = lm.PolicyImportErrorsTotal PolicyChangeTotal = lm.PolicyChangeTotal PolicyEndpointStatus = lm.PolicyEndpointStatus PolicyImplementationDelay = lm.PolicyImplementationDelay + CIDRGroupsReferenced = lm.CIDRGroupsReferenced CIDRGroupTranslationTimeStats = lm.CIDRGroupTranslationTimeStats - CIDRGroupPolicies = lm.CIDRGroupPolicies Identity = lm.Identity EventTS = lm.EventTS EventLagK8s = lm.EventLagK8s ProxyRedirects = lm.ProxyRedirects ProxyPolicyL7Total = lm.ProxyPolicyL7Total - ProxyParseErrors = lm.ProxyParseErrors - ProxyForwarded = lm.ProxyForwarded - ProxyDenied = lm.ProxyDenied - ProxyReceived = lm.ProxyReceived ProxyUpstreamTime = lm.ProxyUpstreamTime ProxyDatapathUpdateTimeout = lm.ProxyDatapathUpdateTimeout - DropCount = lm.DropCount - DropBytes = lm.DropBytes - ForwardCount = lm.ForwardCount - ForwardBytes = lm.ForwardBytes ConntrackGCRuns = lm.ConntrackGCRuns ConntrackGCKeyFallbacks = lm.ConntrackGCKeyFallbacks ConntrackGCSize = lm.ConntrackGCSize @@ -1349,12 +1378,11 @@ func NewLegacyMetrics() *LegacyMetrics { KubernetesAPICallsTotal = lm.KubernetesAPICallsTotal KubernetesCNPStatusCompletion = lm.KubernetesCNPStatusCompletion TerminatingEndpointsEvents = lm.TerminatingEndpointsEvents - IpamEvent = lm.IpamEvent + IPAMEvent = lm.IPAMEvent + IPAMCapacity = lm.IPAMCapacity KVStoreOperationsDuration = lm.KVStoreOperationsDuration KVStoreEventsQueueDuration = lm.KVStoreEventsQueueDuration KVStoreQuorumErrors = lm.KVStoreQuorumErrors - KVStoreSyncQueueSize = lm.KVStoreSyncQueueSize - KVStoreInitialSyncCompleted = lm.KVStoreInitialSyncCompleted FQDNGarbageCollectorCleanedTotal = lm.FQDNGarbageCollectorCleanedTotal FQDNActiveNames = lm.FQDNActiveNames FQDNActiveIPs = lm.FQDNActiveIPs @@ -1364,6 +1392,7 @@ func NewLegacyMetrics() *LegacyMetrics { IPCacheEventsTotal = lm.IPCacheEventsTotal BPFSyscallDuration = lm.BPFSyscallDuration BPFMapOps = lm.BPFMapOps + BPFMapCapacity = lm.BPFMapCapacity TriggerPolicyUpdateTotal = lm.TriggerPolicyUpdateTotal TriggerPolicyUpdateFolds = lm.TriggerPolicyUpdateFolds TriggerPolicyUpdateCallDuration = lm.TriggerPolicyUpdateCallDuration @@ -1379,6 +1408,20 @@ func NewLegacyMetrics() *LegacyMetrics { return lm } +// InitOperatorMetrics is used to init legacy metrics necessary during operator init. +func InitOperatorMetrics() { + ErrorsWarnings = newErrorsWarningsMetric() +} + +func newErrorsWarningsMetric() metric.Vec[metric.Counter] { + return metric.NewCounterVec(metric.CounterOpts{ + ConfigName: Namespace + "_errors_warnings_total", + Namespace: Namespace, + Name: "errors_warnings_total", + Help: "Number of total errors in cilium-agent instances", + }, []string{"level", "subsystem"}) +} + // GaugeWithThreshold is a prometheus gauge that registers itself with // prometheus if over a threshold value and unregisters when under. type GaugeWithThreshold struct { @@ -1442,22 +1485,15 @@ func Reinitialize() { } } -// MustRegister adds the collector to the registry, exposing this metric to -// prometheus scrapes. -// It will panic on error. -func MustRegister(c ...prometheus.Collector) { - withRegistry(func(reg *Registry) { - reg.MustRegister(c...) - }) -} - // Register registers a collector func Register(c prometheus.Collector) error { + var err error + withRegistry(func(reg *Registry) { - reg.Register(c) + err = reg.Register(c) }) - return nil + return err } // RegisterList registers a list of collectors. If registration of one @@ -1547,9 +1583,30 @@ func Error2Outcome(err error) string { return LabelValueOutcomeSuccess } +// LabelOutcome2Code converts a label outcome to a code +func LabelOutcome2Code(outcome string) int { + if outcome == LabelValueOutcomeSuccess { + return 200 + } + return 500 +} + func BoolToFloat64(v bool) float64 { if v { return 1 } return 0 } + +// In general, most bpf maps are allocated to occupy a 16-bit key size. +// To reduce the number of metrics that need to be emitted for map capacity, +// we assume a default map size of 2^16 entries for all maps, which can be +// assumed unless specified otherwise. +const DefaultMapCapacity = 65536 + +func UpdateMapCapacity(groupName string, capacity uint32) { + if capacity == 0 || capacity == DefaultMapCapacity { + return + } + BPFMapCapacity.WithLabelValues(groupName).Set(float64(capacity)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go index 9521c6d1ac..184d3baa71 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go @@ -5,7 +5,11 @@ package metrics -import "golang.org/x/sys/unix" +import ( + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/datapath/linux/probes" +) // Errno2Outcome converts a unix.Errno to LabelOutcome func Errno2Outcome(errno unix.Errno) string { @@ -15,3 +19,12 @@ func Errno2Outcome(errno unix.Errno) string { return LabelValueOutcomeSuccess } + +func enableIfIndexMetric() bool { + // On kernels which do not provide ifindex via the FIB, Cilium needs + // to store it in the CT map, with a field limit of max(uint16). + // The EndpointMaxIfindex metric can be used to determine if that + // limit is approaching. However, it should only be enabled by + // default if we observe that the FIB is not providing the ifindex. + return probes.HaveFibIfindex() != nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go new file mode 100644 index 0000000000..dc4333ab32 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +func enableIfIndexMetric() bool { + return false +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/registry.go b/vendor/github.com/cilium/cilium/pkg/metrics/registry.go index 6bec5a3b12..c6a820f168 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/registry.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/registry.go @@ -24,7 +24,7 @@ import ( ) var defaultRegistryConfig = RegistryConfig{ - PrometheusServeAddr: ":9962", + PrometheusServeAddr: "", } type RegistryConfig struct { @@ -45,7 +45,7 @@ type RegistryParams struct { Logger logrus.FieldLogger Shutdowner hive.Shutdowner - Lifecycle hive.Lifecycle + Lifecycle cell.Lifecycle AutoMetrics []metricpkg.WithMetadata `group:"hive-metrics"` Config RegistryConfig @@ -82,8 +82,8 @@ func NewRegistry(params RegistryParams) *Registry { Handler: mux, } - params.Lifecycle.Append(hive.Hook{ - OnStart: func(hc hive.HookContext) error { + params.Lifecycle.Append(cell.Hook{ + OnStart: func(hc cell.HookContext) error { go func() { params.Logger.Infof("Serving prometheus metrics on %s", params.Config.PrometheusServeAddr) err := srv.ListenAndServe() @@ -93,7 +93,7 @@ func NewRegistry(params RegistryParams) *Registry { }() return nil }, - OnStop: func(hc hive.HookContext) error { + OnStop: func(hc cell.HookContext) error { return srv.Shutdown(hc) }, }) @@ -130,7 +130,7 @@ func (r *Registry) Reinitialize() { metrics := make(map[string]metricpkg.WithMetadata) for i, autoMetric := range r.params.AutoMetrics { - metrics[autoMetric.Opts().ConfigName] = r.params.AutoMetrics[i] + metrics[autoMetric.Opts().GetConfigName()] = r.params.AutoMetrics[i] } // This is a bodge for a very specific feature, inherited from the old `Daemon.additionalMetrics`. diff --git a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go index f75d584727..345786de3f 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go +++ b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go @@ -3,9 +3,13 @@ package addressing +import ( + "net" +) + // AddressType represents a type of IP address for a node. They are copied // from k8s.io/api/core/v1/types.go to avoid pulling in a lot of Kubernetes -// imports into this package.s +// imports into this package. type AddressType string const ( @@ -16,3 +20,46 @@ const ( NodeInternalDNS AddressType = "InternalDNS" NodeCiliumInternalIP AddressType = "CiliumInternalIP" ) + +type Address interface { + AddrType() AddressType + ToString() string +} + +// ExtractNodeIP returns one of the provided IP addresses available with the following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// An error is returned if ExtractNodeIP fails to get an IP based on the provided address family. +func ExtractNodeIP[T Address](addrs []T, ipv6 bool) net.IP { + var backupIP net.IP + for _, addr := range addrs { + parsed := net.ParseIP(addr.ToString()) + if parsed == nil { + continue + } + if (ipv6 && parsed.To4() != nil) || + (!ipv6 && parsed.To4() == nil) { + continue + } + switch addr.AddrType() { + // Ignore CiliumInternalIPs + case NodeCiliumInternalIP: + continue + // Always prefer a cluster internal IP + case NodeInternalIP: + return parsed + case NodeExternalIP: + // Fall back to external Node IP + // if no internal IP could be found + backupIP = parsed + default: + // As a last resort, if no internal or external + // IP was found, use any node address available + if backupIP == nil { + backupIP = parsed + } + } + } + return backupIP +} diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go index 6f94733c6f..c892fe9b0f 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/config.go +++ b/vendor/github.com/cilium/cilium/pkg/option/config.go @@ -8,24 +8,27 @@ import ( "encoding/json" "errors" "fmt" + "io" "math" "net" "net/netip" "os" "path/filepath" + "regexp" "runtime" "sort" "strconv" "strings" - "time" "github.com/shirou/gopsutil/v3/mem" "github.com/sirupsen/logrus" "github.com/spf13/cast" "github.com/spf13/cobra" "github.com/spf13/viper" + "google.golang.org/protobuf/types/known/fieldmaskpb" k8sLabels "k8s.io/apimachinery/pkg/labels" + flowpb "github.com/cilium/cilium/api/v1/flow" "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/cidr" clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types" @@ -37,6 +40,7 @@ import ( "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/time" "github.com/cilium/cilium/pkg/version" ) @@ -107,6 +111,9 @@ const ( // ConntrackGCInterval is the name of the ConntrackGCInterval option ConntrackGCInterval = "conntrack-gc-interval" + // ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option + ConntrackGCMaxInterval = "conntrack-gc-max-interval" + // DebugArg is the argument enables debugging mode DebugArg = "debug" @@ -120,10 +127,6 @@ const ( // direct routing mode (only required by BPF NodePort) DirectRoutingDevice = "direct-routing-device" - // LBDevInheritIPAddr is device name which IP addr is inherited by devices - // running BPF loadbalancer program - LBDevInheritIPAddr = "bpf-lb-dev-ip-addr-inherit" - // DisableEnvoyVersionCheck do not perform Envoy binary version check on startup DisableEnvoyVersionCheck = "disable-envoy-version-check" @@ -133,9 +136,6 @@ const ( // EnableExternalIPs enables implementation of k8s services with externalIPs in datapath EnableExternalIPs = "enable-external-ips" - // K8sEnableEndpointSlice enables the k8s EndpointSlice feature into Cilium - K8sEnableEndpointSlice = "enable-k8s-endpoint-slice" - // EnableL7Proxy is the name of the option to enable L7 proxy EnableL7Proxy = "enable-l7-proxy" @@ -170,6 +170,10 @@ const ( // for the connection from proxy to upstream cluster ProxyIdleTimeout = "proxy-idle-timeout-seconds" + // RestoredProxyPortsAgeLimit specifies the time after which a restored proxy ports file is + // considered stale (in minutes) + RestoredProxyPortsAgeLimit = "restored-proxy-ports-age-limit" + // FixedIdentityMapping is the key-value for the fixed identity mapping // which allows to use reserved label for fixed identities FixedIdentityMapping = "fixed-identity-mapping" @@ -257,7 +261,7 @@ const ( NodePortAlg = "node-port-algorithm" // NodePortAcceleration indicates whether NodePort should be accelerated - // via XDP ("none", "generic" or "native") + // via XDP ("none", "generic", "native", or "best-effort") NodePortAcceleration = "node-port-acceleration" // Alias to NodePortMode @@ -305,8 +309,6 @@ const ( // EnableSessionAffinity enables a support for service sessionAffinity EnableSessionAffinity = "enable-session-affinity" - EnableServiceTopology = "enable-service-topology" - // EnableIdentityMark enables setting the mark field with the identity for // local traffic. This may be disabled if chaining modes and Cilium use // conflicting marks. @@ -321,12 +323,6 @@ const ( // considered local ones with HOST_ID in the ipcache AddressScopeMax = "local-max-addr-scope" - // EnableBandwidthManager enables EDT-based pacing - EnableBandwidthManager = "enable-bandwidth-manager" - - // EnableBBR enables BBR TCP congestion control for the node including Pods - EnableBBR = "enable-bbr" - // EnableRecorder enables the datapath pcap recorder EnableRecorder = "enable-recorder" @@ -357,18 +353,15 @@ const ( // EnableIPv6Masquerade masquerades IPv6 packets from endpoints leaving the host. EnableIPv6Masquerade = "enable-ipv6-masquerade" - // EnableIPv6BIGTCP enables IPv6 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv6BIGTCP = "enable-ipv6-big-tcp" - - // EnableIPv4BIGTCP enables IPv4 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv4BIGTCP = "enable-ipv4-big-tcp" - // EnableBPFClockProbe selects a more efficient source clock (jiffies vs ktime) EnableBPFClockProbe = "enable-bpf-clock-probe" // EnableBPFMasquerade masquerades packets from endpoints leaving the host with BPF instead of iptables EnableBPFMasquerade = "enable-bpf-masquerade" + // EnableMasqueradeRouteSource masquerades to the source route IP address instead of the interface one + EnableMasqueradeRouteSource = "enable-masquerade-to-route-source" + // DeriveMasqIPAddrFromDevice is device name which IP addr is used for BPF masquerades DeriveMasqIPAddrFromDevice = "derive-masquerade-ip-addr-from-device" @@ -400,10 +393,9 @@ const ( // to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules = "install-no-conntrack-iptables-rules" - IPTablesLockTimeout = "iptables-lock-timeout" - - // IPTablesRandomFully sets iptables flag random-fully on masquerading rules - IPTablesRandomFully = "iptables-random-fully" + // ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve + // the provided comma-separated list of ports in the container network namespace + ContainerIPLocalReservedPorts = "container-ip-local-reserved-ports" // IPv6NodeAddr is the IPv6 address of node IPv6NodeAddr = "ipv6-node" @@ -494,6 +486,17 @@ const ( // DNSProxyLockCount. DNSProxyLockTimeout = "dnsproxy-lock-timeout" + // DNSProxySocketLingerTimeout defines how many seconds we wait for the connection + // between the DNS proxy and the upstream server to be closed. + DNSProxySocketLingerTimeout = "dnsproxy-socket-linger-timeout" + + // DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy. + DNSProxyEnableTransparentMode = "dnsproxy-enable-transparent-mode" + + // DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users + // to disable transparent mode even if IPSec is enabled + DNSProxyInsecureSkipTransparentModeCheck = "dnsproxy-insecure-skip-transparent-mode-check" + // MTUName is the name of the MTU option MTUName = "mtu" @@ -512,25 +515,24 @@ const ( // BPFSocketLBHostnsOnly is the name of the BPFSocketLBHostnsOnly option BPFSocketLBHostnsOnly = "bpf-lb-sock-hostns-only" - // TunnelName is the name of the Tunnel option - TunnelName = "tunnel" - // RoutingMode is the name of the option to choose between native routing and tunneling mode RoutingMode = "routing-mode" - // TunnelProtocol is the name of the option to select the tunneling protocol - TunnelProtocol = "tunnel-protocol" + // ServiceNoBackendResponse is the name of the option to pick how to handle traffic for services + // without any backends + ServiceNoBackendResponse = "service-no-backend-response" - // TunnelPortName is the name of the TunnelPort option - TunnelPortName = "tunnel-port" + // ServiceNoBackendResponseReject is the name of the option to reject traffic for services + // without any backends + ServiceNoBackendResponseReject = "reject" - // SingleClusterRouteName is the name of the SingleClusterRoute option - // - // SingleClusterRoute enables use of a single route covering the entire - // cluster CIDR to point to the cilium_host interface instead of using - // a separate route for each cluster node CIDR. This option is not - // compatible with Tunnel=TunnelDisabled - SingleClusterRouteName = "single-cluster-route" + // ServiceNoBackendResponseDrop is the name of the option to drop traffic for services + // without any backends + ServiceNoBackendResponseDrop = "drop" + + // MaxInternalTimerDelay sets a maximum on all periodic timers in + // the agent in order to flush out timer-related bugs in the agent. + MaxInternalTimerDelay = "max-internal-timer-delay" // MonitorAggregationName specifies the MonitorAggregationLevel on the // comandline. @@ -545,12 +547,6 @@ const ( // ciliumEnvPrefix is the prefix used for environment variables ciliumEnvPrefix = "CILIUM_" - // ClusterName is the name of the ClusterName option - ClusterName = "cluster-name" - - // ClusterIDName is the name of the ClusterID option - ClusterIDName = "cluster-id" - // CNIChainingMode configures which CNI plugin Cilium is chained with. CNIChainingMode = "cni-chaining-mode" @@ -652,6 +648,10 @@ const ( // PolicyMapEntriesName configures max entries for BPF policymap. PolicyMapEntriesName = "bpf-policy-map-max" + // PolicyMapFullReconciliationInterval sets the interval for performing the full + // reconciliation of the endpoint policy map. + PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval" + // SockRevNatEntriesName configures max entries for BPF sock reverse nat // entries. SockRevNatEntriesName = "bpf-sock-rev-map-max" @@ -664,10 +664,6 @@ const ( // load loggging LogSystemLoadConfigName = "log-system-load" - // PrependIptablesChainsName is the name of the option to enable - // prepending iptables chains instead of appending - PrependIptablesChainsName = "prepend-iptables-chains" - // DisableCiliumEndpointCRDName is the name of the option to disable // use of the CEP CRD DisableCiliumEndpointCRDName = "disable-endpoint-crd" @@ -753,10 +749,14 @@ const ( // be necessary on key rotations. EnableIPsecKeyWatcher = "enable-ipsec-key-watcher" + // Enable caching for XfrmState for IPSec. Significantly reduces CPU usage + // in large clusters. + EnableIPSecXfrmStateCaching = "enable-ipsec-xfrm-state-caching" + // IPSecKeyFileName is the name of the option for ipsec key file IPSecKeyFileName = "ipsec-key-file" - // EnableWireguard is the name of the option to enable wireguard + // EnableWireguard is the name of the option to enable WireGuard EnableWireguard = "enable-wireguard" // EnableL2Announcements is the name of the option to enable l2 announcements @@ -771,9 +771,23 @@ const ( // L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time. L2AnnouncerRetryPeriod = "l2-announcements-retry-period" - // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to wireguard userspace mode + // EnableEncryptionStrictMode is the name of the option to enable strict encryption mode. + EnableEncryptionStrictMode = "enable-encryption-strict-mode" + + // EncryptionStrictModeCIDR is the CIDR in which the strict ecryption mode should be enforced. + EncryptionStrictModeCIDR = "encryption-strict-mode-cidr" + + // EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of remote node identities. + // This is required when tunneling is used + // or direct routing is used and the node CIDR and pod CIDR overlap. + EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities" + + // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to WireGuard userspace mode EnableWireguardUserspaceFallback = "enable-wireguard-userspace-fallback" + // WireguardPersistentKeepalivee controls Wireguard PersistentKeepalive option. Set 0 to disable. + WireguardPersistentKeepalive = "wireguard-persistent-keepalive" + // NodeEncryptionOptOutLabels is the name of the option for the node-to-node encryption opt-out labels NodeEncryptionOptOutLabels = "node-encryption-opt-out-labels" @@ -811,6 +825,9 @@ const ( // EnableHealthCheckNodePort is the name of the EnableHealthCheckNodePort option EnableHealthCheckNodePort = "enable-health-check-nodeport" + // EnableHealthCheckLoadBalancerIP is the name of the EnableHealthCheckLoadBalancerIP option + EnableHealthCheckLoadBalancerIP = "enable-health-check-loadbalancer-ip" + // PolicyQueueSize is the size of the queues utilized by the policy // repository. PolicyQueueSize = "policy-queue-size" @@ -822,8 +839,13 @@ const ( // endpoints that are no longer alive and healthy. EndpointGCInterval = "endpoint-gc-interval" - // K8sEventHandover is the name of the K8sEventHandover option - K8sEventHandover = "enable-k8s-event-handover" + // This option turns off switching from full pods informer to node's local pods informer + // when CEP CRD is disabled and kvstore is used. + // Switching from full pods informer to node's local pods informer is considered default behaviour + // and this option allows us to change it back to having full pods informer all the time. + // It's meant to be mitigation only in case if endpoint synchronization from kvstore has some bugs + // and we actually need to watch all pods all the time. + LegacyTurnOffK8sEventHandover = "legacy-turn-off-k8s-event-handover" // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 = "ipv4-service-loopback-address" @@ -857,9 +879,15 @@ const ( // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation" + // IPAMDefaultIPPool defines the default IP Pool when using multi-pool + IPAMDefaultIPPool = "ipam-default-ip-pool" + // XDPModeNative for loading progs with XDPModeLinkDriver XDPModeNative = "native" + // XDPModeBestEffort for loading progs with XDPModeLinkDriver + XDPModeBestEffort = "best-effort" + // XDPModeGeneric for loading progs with XDPModeLinkGeneric XDPModeGeneric = "testing-only" @@ -891,9 +919,9 @@ const ( // IPv6NativeRoutingCIDR describes a v6 CIDR in which pod IPs are routable IPv6NativeRoutingCIDR = "ipv6-native-routing-cidr" - // EgressMasqueradeInterfaces is the selector used to select interfaces - // subject to egress masquerading - EgressMasqueradeInterfaces = "egress-masquerade-interfaces" + // MasqueradeInterfaces is the selector used to select interfaces subject to + // egress masquerading + MasqueradeInterfaces = "egress-masquerade-interfaces" // PolicyTriggerInterval is the amount of time between triggers of policy // updates are invoked. @@ -911,10 +939,6 @@ const ( // identity allocation IdentityAllocationModeCRD = "crd" - // DisableCNPStatusUpdates disables updating of CNP NodeStatus in the CNP - // CRD. - DisableCNPStatusUpdates = "disable-cnp-status-updates" - // EnableLocalNodeRoute controls installation of the route which points // the allocation prefix of the local node. EnableLocalNodeRoute = "enable-local-node-route" @@ -972,6 +996,10 @@ const ( // HubbleMetrics specifies enabled metrics and their configuration options. HubbleMetrics = "hubble-metrics" + // HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs. + // e.g. "/etc/cilium/flowlog.yaml" + HubbleFlowlogsConfigFilePath = "hubble-flowlogs-config-path" + // HubbleExportFilePath specifies the filepath to write Hubble events to. // e.g. "/var/run/cilium/hubble/events.log" HubbleExportFilePath = "hubble-export-file-path" @@ -986,6 +1014,15 @@ const ( // HubbleExportFileCompress specifies whether rotated files are compressed. HubbleExportFileCompress = "hubble-export-file-compress" + // HubbleExportAllowlist specifies allow list filter use by exporter. + HubbleExportAllowlist = "hubble-export-allowlist" + + // HubbleExportDenylist specifies deny list filter use by exporter. + HubbleExportDenylist = "hubble-export-denylist" + + // HubbleExportFieldmask specifies list of fields to log in exporter. + HubbleExportFieldmask = "hubble-export-fieldmask" + // EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served EnableHubbleRecorderAPI = "enable-hubble-recorder-api" @@ -1006,9 +1043,23 @@ const ( // By default, Hubble observes all monitor events. HubbleMonitorEvents = "hubble-monitor-events" - // DisableIptablesFeederRules specifies which chains will be excluded - // when installing the feeder rules - DisableIptablesFeederRules = "disable-iptables-feeder-rules" + // HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows + HubbleRedactEnabled = "hubble-redact-enabled" + + // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows + HubbleRedactHttpURLQuery = "hubble-redact-http-urlquery" + + // HubbleRedactHttpUserInfo controls if the user info will be redacted from flows + HubbleRedactHttpUserInfo = "hubble-redact-http-userinfo" + + // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows + HubbleRedactKafkaApiKey = "hubble-redact-kafka-apikey" + + // HubbleRedactHttpHeadersAllow controls which http headers will not be redacted from flows + HubbleRedactHttpHeadersAllow = "hubble-redact-http-headers-allow" + + // HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows + HubbleRedactHttpHeadersDeny = "hubble-redact-http-headers-deny" // K8sHeartbeatTimeout configures the timeout for apiserver heartbeat K8sHeartbeatTimeout = "k8s-heartbeat-timeout" @@ -1064,13 +1115,6 @@ const ( // LBMaglevMapMaxEntries configures max entries of bpf map for Maglev. LBMaglevMapMaxEntries = "bpf-lb-maglev-map-max" - // K8sServiceProxyName instructs Cilium to handle service objects only when - // service.kubernetes.io/service-proxy-name label equals the provided value. - K8sServiceProxyName = "k8s-service-proxy-name" - - // APIRateLimitName enables configuration of the API rate limits - APIRateLimitName = "api-rate-limit" - // CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not // available. CRDWaitTimeout = "crd-wait-timeout" @@ -1094,6 +1138,9 @@ const ( // compatible with MetalLB's configuration. BGPConfigPath = "bgp-config-path" + // BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from. + BGPSecretsNamespace = "bgp-secrets-namespace" + // ExternalClusterIPName is the name of the option to enable // cluster external access to ClusterIP services. ExternalClusterIPName = "bpf-lb-external-clusterip" @@ -1159,16 +1206,15 @@ const ( // and the max size and TTL of events in the buffers should be. BPFMapEventBuffers = "bpf-map-event-buffers" - // EnableStaleCiliumEndpointCleanup sets whether Cilium should perform cleanup of - // stale CiliumEndpoints during init. - EnableStaleCiliumEndpointCleanup = "enable-stale-cilium-endpoint-cleanup" - // IPAMCiliumnodeUpdateRate is the maximum rate at which the CiliumNode custom // resource is updated. IPAMCiliumNodeUpdateRate = "ipam-cilium-node-update-rate" // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = "enable-k8s-networkpolicy" + + // PolicyCIDRMatchMode defines the entities that CIDR selectors can reach + PolicyCIDRMatchMode = "policy-cidr-match-mode" ) // Default string arguments @@ -1180,18 +1226,6 @@ var ( MonitorAggregationFlagsDefault = []string{"syn", "fin", "rst"} ) -// Available option for DaemonConfig.Tunnel -const ( - // TunnelVXLAN specifies VXLAN encapsulation - TunnelVXLAN = "vxlan" - - // TunnelGeneve specifies Geneve encapsulation - TunnelGeneve = "geneve" - - // TunnelDisabled specifies to disable encapsulation - TunnelDisabled = "disabled" -) - // Available options for DaemonConfig.RoutingMode const ( // RoutingModeNative specifies native routing mode @@ -1234,6 +1268,12 @@ const ( // is considered timed out ProxyConnectTimeout = "proxy-connect-timeout" + // ProxyXffNumTrustedHopsIngress specifies the number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + ProxyXffNumTrustedHopsIngress = "proxy-xff-num-trusted-hops-ingress" + + // ProxyXffNumTrustedHopsEgress specifies the number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + ProxyXffNumTrustedHopsEgress = "proxy-xff-num-trusted-hops-egress" + // ProxyGID specifies the group ID that has access to unix domain sockets opened by Cilium // agent for proxy configuration and access logging. ProxyGID = "proxy-gid" @@ -1252,6 +1292,9 @@ const ( // CNIExclusive tells the agent to remove other CNI configuration files CNIExclusive = "cni-exclusive" + // CNIExternalRouting delegates endpoint routing to the chained CNI plugin. + CNIExternalRouting = "cni-external-routing" + // CNILogFile is the path to a log file (on the host) for the CNI plugin // binary to use for logging. CNILogFile = "cni-log-file" @@ -1309,6 +1352,9 @@ const ( // NodePortAccelerationNative means we accelerate NodePort via native XDP in the driver (preferred) NodePortAccelerationNative = XDPModeNative + // NodePortAccelerationBestEffort means we accelerate NodePort via native XDP in the driver (preferred), but will skip devices without driver support + NodePortAccelerationBestEffort = XDPModeBestEffort + // KubeProxyReplacementPartial specifies to enable only selected kube-proxy // replacement features (might panic) KubeProxyReplacementPartial = "partial" @@ -1337,11 +1383,6 @@ const ( PprofPortAgent = 6060 ) -// GetTunnelModes returns the list of all tunnel modes -func GetTunnelModes() string { - return fmt.Sprintf("%s, %s, %s", TunnelVXLAN, TunnelGeneve, TunnelDisabled) -} - // getEnvName returns the environment variable to be used for the given option name. func getEnvName(option string) string { under := strings.Replace(option, "-", "_", -1) @@ -1414,11 +1455,8 @@ type DaemonConfig struct { // devices. EnableRuntimeDeviceDetection bool - DatapathMode string // Datapath mode - Tunnel string // Tunnel mode - RoutingMode string // Routing mode - TunnelProtocol string // Tunneling protocol - TunnelPort int // Tunnel port + DatapathMode string // Datapath mode + RoutingMode string // Routing mode DryMode bool // Do not create BPF maps, devices, .. @@ -1519,6 +1557,10 @@ type DaemonConfig struct { CTMapEntriesTimeoutSYN time.Duration CTMapEntriesTimeoutFIN time.Duration + // MaxInternalTimerDelay sets a maximum on all periodic timers in + // the agent in order to flush out timer-related bugs in the agent. + MaxInternalTimerDelay time.Duration + // MonitorAggregationInterval configures the interval between monitor // messages when monitor aggregation is enabled. MonitorAggregationInterval time.Duration @@ -1547,6 +1589,10 @@ type DaemonConfig struct { // endpoint may allow traffic to exchange traffic with. PolicyMapEntries int + // PolicyMapFullReconciliationInterval is the interval at which to perform + // the full reconciliation of the endpoint policy map. + PolicyMapFullReconciliationInterval time.Duration + // SockRevNatEntries is the maximum number of sock rev nat mappings // allowed in the BPF rev nat table SockRevNatEntries int @@ -1558,10 +1604,6 @@ type DaemonConfig struct { // RunInterval. Zero means unlimited. MaxControllerInterval int - // UseSingleClusterRoute specifies whether to use a single cluster route - // instead of per-node routes. - UseSingleClusterRoute bool - // HTTPNormalizePath switches on Envoy HTTP path normalization options, which currently // includes RFC 3986 path normalization, Envoy merge slashes option, and unescaping and // redirecting for paths that contain escaped slashes. These are necessary to keep path based @@ -1598,6 +1640,12 @@ type DaemonConfig struct { // connection attempt to have timed out. ProxyConnectTimeout int + // ProxyXffNumTrustedHopsIngress defines the number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + ProxyXffNumTrustedHopsIngress uint32 + + // ProxyXffNumTrustedHopsEgress defines the number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + ProxyXffNumTrustedHopsEgress uint32 + // ProxyGID specifies the group ID that has access to unix domain sockets opened by Cilium // agent for proxy configuration and access logging. ProxyGID int @@ -1615,24 +1663,16 @@ type DaemonConfig struct { // for the connection from proxy to upstream cluster ProxyIdleTimeout time.Duration + // RestoredProxyPortsAgeLimit specifies the time after which a restored proxy ports file is + // considered stale (in minutes) + RestoredProxyPortsAgeLimit uint + // EnvoyLogPath specifies where to store the Envoy proxy logs when Envoy // runs in the same container as Cilium. EnvoyLogPath string ProcFs string - // PrependIptablesChains is the name of the option to enable prepending - // iptables chains instead of appending - PrependIptablesChains bool - - // IPTablesLockTimeout defines the "-w" iptables option when the - // iptables CLI is directly invoked from the Cilium agent. - IPTablesLockTimeout time.Duration - - // IPTablesRandomFully defines the "--random-fully" iptables option when the - // iptables CLI is directly invoked from the Cilium agent. - IPTablesRandomFully bool - // K8sNamespace is the name of the namespace in which Cilium is // deployed in when running in Kubernetes mode K8sNamespace string @@ -1659,12 +1699,6 @@ type DaemonConfig struct { // EnableIPv6NDP is true when NDP is enabled for IPv6 EnableIPv6NDP bool - // EnableIPv6BIGTCP enables IPv6 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv6BIGTCP bool - - // EnableIPv4BIGTCP enables IPv4 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv4BIGTCP bool - // EnableSRv6 is true when SRv6 encapsulation support is enabled EnableSRv6 bool @@ -1694,12 +1728,29 @@ type DaemonConfig struct { // be necessary on key rotations. EnableIPsecKeyWatcher bool + // EnableIPSecXfrmStateCaching enables IPSec XfrmState caching. + EnableIPSecXfrmStateCaching bool + // EnableWireguard enables Wireguard encryption EnableWireguard bool + // EnableEncryptionStrictMode enables strict mode for encryption + EnableEncryptionStrictMode bool + + // EncryptionStrictModeCIDR is the CIDR to use for strict mode + EncryptionStrictModeCIDR netip.Prefix + + // EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of node identities. + // This is required when tunneling is used + // or direct routing is used and the node CIDR and pod CIDR overlap. + EncryptionStrictModeAllowRemoteNodeIdentities bool + // EnableWireguardUserspaceFallback enables the fallback to the userspace implementation EnableWireguardUserspaceFallback bool + // WireguardPersistentKeepalive controls Wireguard PersistentKeepalive option. + WireguardPersistentKeepalive time.Duration + // EnableL2Announcements enables L2 announcement of service IPs EnableL2Announcements bool @@ -1752,6 +1803,7 @@ type DaemonConfig struct { KVStoreOpt map[string]string LabelPrefixFile string Labels []string + LegacyTurnOffK8sEventHandover bool LogDriver []string LogOpt map[string]string Logstash bool @@ -1759,29 +1811,31 @@ type DaemonConfig struct { // Masquerade specifies whether or not to masquerade packets from endpoints // leaving the host. - EnableIPv4Masquerade bool - EnableIPv6Masquerade bool - EnableBPFMasquerade bool - DeriveMasqIPAddrFromDevice string - EnableBPFClockProbe bool - EnableIPMasqAgent bool - EnableIPv4EgressGateway bool - EnableEnvoyConfig bool - EnableIngressController bool - EnableGatewayAPI bool - EnvoyConfigTimeout time.Duration - IPMasqAgentConfigPath string - InstallIptRules bool - MonitorAggregation string - PreAllocateMaps bool - IPv6NodeAddr string - IPv4NodeAddr string - SidecarIstioProxyImage string - SocketPath string - TracePayloadlen int - Version string - PrometheusServeAddr string - ToFQDNsMinTTL int + EnableIPv4Masquerade bool + EnableIPv6Masquerade bool + EnableBPFMasquerade bool + EnableMasqueradeRouteSource bool + EnableIPMasqAgent bool + DeriveMasqIPAddrFromDevice string + IPMasqAgentConfigPath string + + EnableBPFClockProbe bool + EnableIPv4EgressGateway bool + EnableEnvoyConfig bool + EnableIngressController bool + EnableGatewayAPI bool + EnvoyConfigTimeout time.Duration + InstallIptRules bool + MonitorAggregation string + PreAllocateMaps bool + IPv6NodeAddr string + IPv4NodeAddr string + SidecarIstioProxyImage string + SocketPath string + TracePayloadlen int + Version string + PrometheusServeAddr string + ToFQDNsMinTTL int // DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain // for each FQDN selector in endpoint's restored DNS rules @@ -1838,14 +1892,25 @@ type DaemonConfig struct { // been reached. DNSProxyConcurrencyProcessingGracePeriod time.Duration + // DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy. + DNSProxyEnableTransparentMode bool + + // DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users + // to disable transparent mode even if IPSec is enabled + DNSProxyInsecureSkipTransparentModeCheck bool + // DNSProxyLockCount is the array size containing mutexes which protect - // against parallel handling of DNS response IPs. + // against parallel handling of DNS response names. DNSProxyLockCount int // DNSProxyLockTimeout is timeout when acquiring the locks controlled by // DNSProxyLockCount. DNSProxyLockTimeout time.Duration + // DNSProxySocketLingerTimeout defines how many seconds we wait for the connection + // between the DNS proxy and the upstream server to be closed. + DNSProxySocketLingerTimeout int + // EnableXTSocketFallback allows disabling of kernel's ip_early_demux // sysctl option if `xt_socket` kernel module is not available. EnableXTSocketFallback bool @@ -1874,6 +1939,10 @@ type DaemonConfig struct { // cilium EnableHealthCheckNodePort bool + // EnableHealthCheckLoadBalancerIP enables health checking of LoadBalancerIP + // by cilium + EnableHealthCheckLoadBalancerIP bool + // KVstoreKeepAliveInterval is the interval in which the lease is being // renewed. This must be set to a value lesser than the LeaseTTL ideally // by a factor of 3. @@ -1884,7 +1953,7 @@ type DaemonConfig struct { // KVstoreMaxConsecutiveQuorumErrors is the maximum number of acceptable // kvstore consecutive quorum errors before the agent assumes permanent failure - KVstoreMaxConsecutiveQuorumErrors int + KVstoreMaxConsecutiveQuorumErrors uint // KVstorePeriodicSync is the time interval in which periodic // synchronization with the kvstore occurs @@ -1924,11 +1993,9 @@ type DaemonConfig struct { // interval ConntrackGCInterval time.Duration - // K8sEventHandover enables use of the kvstore to optimize Kubernetes - // event handling by listening for k8s events in the operator and - // mirroring it into the kvstore for reduced overhead in large - // clusters. - K8sEventHandover bool + // ConntrackGCMaxInterval if set limits the automatic GC interval calculation to + // the specified maximum value. + ConntrackGCMaxInterval time.Duration // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 string @@ -1999,7 +2066,7 @@ type DaemonConfig struct { MaglevHashSeed string // NodePortAcceleration indicates whether NodePort should be accelerated - // via XDP ("none", "generic" or "native") + // via XDP ("none", "generic", "native", or "best-effort") NodePortAcceleration string // NodePortBindProtection rejects bind requests to NodePort service ports @@ -2018,15 +2085,6 @@ type DaemonConfig struct { // considered local ones with HOST_ID in the ipcache AddressScopeMax int - // EnableBandwidthManager enables EDT-based pacing - EnableBandwidthManager bool - - // EnableBBR enables BBR TCP congestion control for the node including Pods - EnableBBR bool - - // ResetQueueMapping resets the Pod's skb queue mapping - ResetQueueMapping bool - // EnableRecorder enables the datapath pcap recorder EnableRecorder bool @@ -2048,10 +2106,6 @@ type DaemonConfig struct { // EnableLocalRedirectPolicy enables redirect policies to redirect traffic within nodes EnableLocalRedirectPolicy bool - // K8sEnableEndpointSlice enables k8s endpoint slice feature that is used - // in kubernetes. - K8sEnableK8sEndpointSlice bool - // NodePortMin is the minimum port address for the NodePort range NodePortMin int @@ -2061,8 +2115,6 @@ type DaemonConfig struct { // EnableSessionAffinity enables a support for service sessionAffinity EnableSessionAffinity bool - EnableServiceTopology bool - // Selection of BPF main clock source (ktime vs jiffies) ClockSource BPFClockSource @@ -2094,7 +2146,8 @@ type DaemonConfig struct { // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool IPAMMultiPoolPreAllocation map[string]string - + // IPAMDefaultIPPool the default IP Pool when using multi-pool + IPAMDefaultIPPool string // AutoCreateCiliumNodeResource enables automatic creation of a // CiliumNode resource for the local node AutoCreateCiliumNodeResource bool @@ -2105,8 +2158,11 @@ type DaemonConfig struct { // IPv6NativeRoutingCIDR describes a CIDR in which pod IPs are routable IPv6NativeRoutingCIDR *cidr.CIDR - // EgressMasqueradeInterfaces is the selector used to select interfaces - // subject to egress masquerading + // MasqueradeInterfaces is the selector used to select interfaces subject + // to egress masquerading. EgressMasqueradeInterfaces is the same but as + // a string representation. It's deprecated and can be removed once the GH + // issue https://github.com/cilium/cilium-cli/issues/1896 is fixed. + MasqueradeInterfaces []string EgressMasqueradeInterfaces string // PolicyTriggerInterval is the amount of time between when policy updates @@ -2117,10 +2173,6 @@ type DaemonConfig struct { // allocation IdentityAllocationMode string - // DisableCNPStatusUpdates disables updating of CNP NodeStatus in the CNP - // CRD. - DisableCNPStatusUpdates bool - // AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in // the network policy for cilium-agent. AllowICMPFragNeeded bool @@ -2182,6 +2234,10 @@ type DaemonConfig struct { // HubbleMetrics specifies enabled metrics and their configuration options. HubbleMetrics []string + // HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs. + // e.g. "/etc/cilium/flowlog.yaml" + HubbleFlowlogsConfigFilePath string + // HubbleExportFilePath specifies the filepath to write Hubble events to. // e.g. "/var/run/cilium/hubble/events.log" HubbleExportFilePath string @@ -2196,6 +2252,15 @@ type DaemonConfig struct { // HubbleExportFileCompress specifies whether rotated files are compressed. HubbleExportFileCompress bool + // HubbleExportAllowlist specifies allow list filter use by exporter. + HubbleExportAllowlist []*flowpb.FlowFilter + + // HubbleExportDenylist specifies deny list filter use by exporter. + HubbleExportDenylist []*flowpb.FlowFilter + + // HubbleExportFieldmask specifies list of fields to log in exporter. + HubbleExportFieldmask []string + // EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served EnableHubbleRecorderAPI bool @@ -2216,14 +2281,28 @@ type DaemonConfig struct { // By default, Hubble observes all monitor events. HubbleMonitorEvents []string + // HubbleRedactEnabled controls if Hubble will be redacting sensitive information from L7 flows + HubbleRedactEnabled bool + + // HubbleRedactURLQuery controls if the URL query will be redacted from flows + HubbleRedactHttpURLQuery bool + + // HubbleRedactUserInfo controls if the user info will be redacted from flows + HubbleRedactHttpUserInfo bool + + // HubbleRedactKafkaApiKey controls if Kafka API key will be redacted from flows + HubbleRedactKafkaApiKey bool + + // HubbleRedactHttpHeadersAllow controls which http headers will not be redacted from flows + HubbleRedactHttpHeadersAllow []string + + // HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows + HubbleRedactHttpHeadersDeny []string + // EndpointStatus enables population of information in the // CiliumEndpoint.Status resource EndpointStatus map[string]struct{} - // DisableIptablesFeederRules specifies which chains will be excluded - // when installing the feeder rules - DisableIptablesFeederRules []string - // EnableIPv4FragmentsTracking enables IPv4 fragments tracking for // L4-based lookups. Needs LRU map support. EnableIPv4FragmentsTracking bool @@ -2276,16 +2355,6 @@ type DaemonConfig struct { // LBMaglevMapEntries is the maximum number of entries allowed in BPF lbmap for maglev. LBMaglevMapEntries int - // K8sServiceProxyName is the value of service.kubernetes.io/service-proxy-name label, - // that identifies the service objects Cilium should handle. - // If the provided value is an empty string, Cilium will manage service objects when - // the label is not present. For more details - - // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/2447-Make-kube-proxy-service-abstraction-optional - K8sServiceProxyName string - - // APIRateLimitName enables configuration of the API rate limits - APIRateLimit map[string]string - // CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not // available. CRDWaitTimeout time.Duration @@ -2298,6 +2367,10 @@ type DaemonConfig struct { // InstallNoConntrackIptRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules bool + // ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve + // the provided comma-separated list of ports in the container network namespace + ContainerIPLocalReservedPorts string + // EnableCustomCalls enables tail call hooks for user-defined custom // eBPF programs, typically used to collect custom per-endpoint // metrics. @@ -2313,6 +2386,9 @@ type DaemonConfig struct { // compatible with MetalLB's configuration. BGPConfigPath string + // BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from. + BGPSecretsNamespace string + // ExternalClusterIP enables routing to ClusterIP services from outside // the cluster. This mirrors the behaviour of kube-proxy. ExternalClusterIP bool @@ -2377,67 +2453,84 @@ type DaemonConfig struct { BPFMapEventBuffersValidator func(val string) (string, error) `json:"-"` bpfMapEventConfigs BPFEventBufferConfigs - // EnableStaleCiliumEndpointCleanup enables cleanup routine during Cilium init. - // This will attempt to remove local CiliumEndpoints that are not managed by Cilium - // following Endpoint restoration. - EnableStaleCiliumEndpointCleanup bool - // IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom // resource is updated. IPAMCiliumNodeUpdateRate time.Duration // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy bool + + // PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy. + // Currently supported values: + // - world + // - world, remote-node + PolicyCIDRMatchMode []string + + // MaxConnectedClusters sets the maximum number of clusters that can be + // connected in a clustermesh. + // The value is used to determine the bit allocation for cluster ID and + // identity in a numeric identity. Values > 255 will decrease the number of + // allocatable identities. + MaxConnectedClusters uint32 + + // ForceDeviceRequired enforces the attachment of BPF programs on native device. + ForceDeviceRequired bool + + // ServiceNoBackendResponse determines how we handle traffic to a service with no backends. + ServiceNoBackendResponse string } var ( // Config represents the daemon configuration Config = &DaemonConfig{ - CreationTime: time.Now(), - Opts: NewIntOptions(&DaemonOptionLibrary), - Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0}, - IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR, - IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase, - EnableHostIPRestore: defaults.EnableHostIPRestore, - EnableHealthChecking: defaults.EnableHealthChecking, - EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking, - EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort, - EnableIPv4: defaults.EnableIPv4, - EnableIPv6: defaults.EnableIPv6, - EnableIPv6NDP: defaults.EnableIPv6NDP, - EnableSCTP: defaults.EnableSCTP, - EnableL7Proxy: defaults.EnableL7Proxy, - EndpointStatus: make(map[string]struct{}), - DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule, - ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost, - KVstorePeriodicSync: defaults.KVstorePeriodicSync, - KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout, - IPAllocationTimeout: defaults.IPAllocationTimeout, - IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod, - IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriod, - FixedIdentityMapping: make(map[string]string), - KVStoreOpt: make(map[string]string), - LogOpt: make(map[string]string), - LoopbackIPv4: defaults.LoopbackIPv4, - EnableEndpointRoutes: defaults.EnableEndpointRoutes, - AnnotateK8sNode: defaults.AnnotateK8sNode, - K8sServiceCacheSize: defaults.K8sServiceCacheSize, - AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource, - IdentityAllocationMode: IdentityAllocationModeKVstore, - AllowICMPFragNeeded: defaults.AllowICMPFragNeeded, - EnableWellKnownIdentities: defaults.EnableWellKnownIdentities, - K8sEnableK8sEndpointSlice: defaults.K8sEnableEndpointSlice, - AllocatorListTimeout: defaults.AllocatorListTimeout, - EnableICMPRules: defaults.EnableICMPRules, - UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec, + CreationTime: time.Now(), + Opts: NewIntOptions(&DaemonOptionLibrary), + Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0}, + IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR, + IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase, + IPAMDefaultIPPool: defaults.IPAMDefaultIPPool, + EnableHostIPRestore: defaults.EnableHostIPRestore, + EnableHealthChecking: defaults.EnableHealthChecking, + EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking, + EnableHealthCheckLoadBalancerIP: defaults.EnableHealthCheckLoadBalancerIP, + EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort, + EnableIPv4: defaults.EnableIPv4, + EnableIPv6: defaults.EnableIPv6, + EnableIPv6NDP: defaults.EnableIPv6NDP, + EnableSCTP: defaults.EnableSCTP, + EnableL7Proxy: defaults.EnableL7Proxy, + EndpointStatus: make(map[string]struct{}), + DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule, + ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost, + KVstorePeriodicSync: defaults.KVstorePeriodicSync, + KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout, + IPAllocationTimeout: defaults.IPAllocationTimeout, + IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod, + IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriod, + FixedIdentityMapping: make(map[string]string), + KVStoreOpt: make(map[string]string), + LogOpt: make(map[string]string), + LoopbackIPv4: defaults.LoopbackIPv4, + EnableEndpointRoutes: defaults.EnableEndpointRoutes, + AnnotateK8sNode: defaults.AnnotateK8sNode, + K8sServiceCacheSize: defaults.K8sServiceCacheSize, + AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource, + IdentityAllocationMode: IdentityAllocationModeKVstore, + AllowICMPFragNeeded: defaults.AllowICMPFragNeeded, + EnableWellKnownIdentities: defaults.EnableWellKnownIdentities, + AllocatorListTimeout: defaults.AllocatorListTimeout, + EnableICMPRules: defaults.EnableICMPRules, + UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec, K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery, - APIRateLimit: make(map[string]string), ExternalClusterIP: defaults.ExternalClusterIP, EnableVTEP: defaults.EnableVTEP, EnableBGPControlPlane: defaults.EnableBGPControlPlane, EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy, + PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode, + MaxConnectedClusters: defaults.MaxConnectedClusters, + EnableEnvoyConfig: defaults.EnableEnvoyConfig, } ) @@ -2542,31 +2635,11 @@ func (c *DaemonConfig) TunnelingEnabled() bool { return c.RoutingMode != RoutingModeNative } -// TunnelDevice returns cilium_{vxlan,geneve} depending on the config or "" if disabled. -func (c *DaemonConfig) TunnelDevice() string { - if c.TunnelingEnabled() { - return fmt.Sprintf("cilium_%s", c.TunnelProtocol) - } else { - return "" - } -} - -// TunnelExists returns true if some traffic may go through a tunnel, including -// if the primary mode is native routing. For example, in the egress gateway, -// we may send such traffic to a gateway node via a tunnel. -// In conjunction with the DSR Geneve and the direct routing, traffic from -// intermediate nodes to backend pods go through a tunnel, but the datapath logic -// takes care of the MTU overhead. So no need to take it into account here. -// See encap_geneve_dsr_opt[4,6] in nodeport.h -func (c *DaemonConfig) TunnelExists() bool { - return c.TunnelingEnabled() || c.EnableIPv4EgressGateway || c.EnableHighScaleIPcache -} - // AreDevicesRequired returns true if the agent needs to attach to the native // devices to implement some features. func (c *DaemonConfig) AreDevicesRequired() bool { - return c.EnableNodePort || c.EnableHostFirewall || c.EnableBandwidthManager || - c.EnableWireguard || c.EnableHighScaleIPcache || c.EnableL2Announcements + return c.EnableNodePort || c.EnableHostFirewall || c.EnableWireguard || + c.EnableHighScaleIPcache || c.EnableL2Announcements || c.ForceDeviceRequired } // MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled. @@ -2662,23 +2735,6 @@ func (c *DaemonConfig) EndpointStatusIsEnabled(option string) bool { return ok } -// LocalClusterName returns the name of the cluster Cilium is deployed in -func (c *DaemonConfig) LocalClusterName() string { - return c.ClusterName -} - -// LocalClusterID returns the ID of the cluster local to the Cilium agent. -func (c *DaemonConfig) LocalClusterID() uint32 { - return c.ClusterID -} - -// K8sServiceProxyName returns the required value for the -// service.kubernetes.io/service-proxy-name label in order for services to be -// handled. -func (c *DaemonConfig) K8sServiceProxyNameValue() string { - return c.K8sServiceProxyName -} - // CiliumNamespaceName returns the name of the namespace in which Cilium is // deployed in func (c *DaemonConfig) CiliumNamespaceName() string { @@ -2700,6 +2756,11 @@ func (c *DaemonConfig) K8sNetworkPolicyEnabled() bool { return c.EnableK8sNetworkPolicy } +// K8sEnvoyConfigEnabled returns true if CiliumEnvoyConfig feature is enabled in Cilium +func (c *DaemonConfig) K8sEnvoyConfigEnabled() bool { + return c.EnableEnvoyConfig +} + // K8sIngressControllerEnabled returns true if ingress controller feature is enabled in Cilium func (c *DaemonConfig) K8sIngressControllerEnabled() bool { return c.EnableIngressController @@ -2710,6 +2771,28 @@ func (c *DaemonConfig) K8sGatewayAPIEnabled() bool { return c.EnableGatewayAPI } +func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool { + for _, mode := range c.PolicyCIDRMatchMode { + if mode == "nodes" { + return true + } + } + return false +} + +func (c *DaemonConfig) validatePolicyCIDRMatchMode() error { + // Currently, the only acceptable values is "nodes". + for _, mode := range c.PolicyCIDRMatchMode { + switch mode { + case "nodes": + continue + default: + return fmt.Errorf("unknown CIDR match mode: %s", mode) + } + } + return nil +} + // DirectRoutingDeviceRequired return whether the Direct Routing Device is needed under // the current configuration. func (c *DaemonConfig) DirectRoutingDeviceRequired() bool { @@ -2722,7 +2805,12 @@ func (c *DaemonConfig) DirectRoutingDeviceRequired() bool { return true } - return (c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard) && !c.TunnelingEnabled() + return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard +} + +func (c *DaemonConfig) LoadBalancerUsesDSR() bool { + return c.NodePortMode == NodePortModeDSR || + c.NodePortMode == NodePortModeHybrid } func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error { @@ -2753,18 +2841,41 @@ func (c *DaemonConfig) validateIPv6NAT46x64CIDR() error { return nil } +func (c *DaemonConfig) validateHubbleRedact() error { + if len(c.HubbleRedactHttpHeadersAllow) > 0 && len(c.HubbleRedactHttpHeadersDeny) > 0 { + return fmt.Errorf("Only one of --hubble-redact-http-headers-allow and --hubble-redact-http-headers-deny can be specified, not both") + } + return nil +} + +func (c *DaemonConfig) validateContainerIPLocalReservedPorts() error { + if c.ContainerIPLocalReservedPorts == "" || c.ContainerIPLocalReservedPorts == defaults.ContainerIPLocalReservedPortsAuto { + return nil + } + + if regexp.MustCompile(`^(\d+(-\d+)?)(,\d+(-\d+)?)*$`).MatchString(c.ContainerIPLocalReservedPorts) { + return nil + } + + return fmt.Errorf("Invalid comma separated list of of ranges for %s option", ContainerIPLocalReservedPorts) +} + // Validate validates the daemon configuration func (c *DaemonConfig) Validate(vp *viper.Viper) error { if err := c.validateIPv6ClusterAllocCIDR(); err != nil { - return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %s", + return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %w", c.IPv6ClusterAllocCIDR, IPv6ClusterAllocCIDRName, err) } if err := c.validateIPv6NAT46x64CIDR(); err != nil { - return fmt.Errorf("unable to parse internal CIDR value '%s': %s", + return fmt.Errorf("unable to parse internal CIDR value '%s': %w", c.IPv6NAT46x64CIDR, err) } + if err := c.validateHubbleRedact(); err != nil { + return err + } + if c.MTU < 0 { return fmt.Errorf("MTU '%d' cannot be negative", c.MTU) } @@ -2793,27 +2904,16 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { c.RoutingMode, RoutingModeTunnel, RoutingModeNative) } - switch c.TunnelProtocol { - case TunnelVXLAN, TunnelGeneve: - default: - return fmt.Errorf("invalid tunnel protocol %q", c.TunnelProtocol) - } - - if c.RoutingMode == RoutingModeNative && c.UseSingleClusterRoute { - return fmt.Errorf("option --%s cannot be used in combination with --%s=%s", - SingleClusterRouteName, RoutingMode, RoutingModeNative) + cinfo := clustermeshTypes.ClusterInfo{ + ID: c.ClusterID, + Name: c.ClusterName, + MaxConnectedClusters: c.MaxConnectedClusters, } - - if c.ClusterID < clustermeshTypes.ClusterIDMin || c.ClusterID > clustermeshTypes.ClusterIDMax { - return fmt.Errorf("invalid cluster id %d: must be in range %d..%d", - c.ClusterID, clustermeshTypes.ClusterIDMin, clustermeshTypes.ClusterIDMax) + if err := cinfo.InitClusterIDMax(); err != nil { + return err } - - if c.ClusterID != 0 { - if c.ClusterName == defaults.ClusterName { - return fmt.Errorf("cannot use default cluster name (%s) with option %s", - defaults.ClusterName, ClusterIDName) - } + if err := cinfo.Validate(); err != nil { + return err } if err := c.checkMapSizeLimits(); err != nil { @@ -2853,6 +2953,14 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { } } + if err := c.validatePolicyCIDRMatchMode(); err != nil { + return err + } + + if err := c.validateContainerIPLocalReservedPorts(); err != nil { + return err + } + return nil } @@ -2862,7 +2970,7 @@ func ReadDirConfig(dirName string) (map[string]interface{}, error) { m := map[string]interface{}{} files, err := os.ReadDir(dirName) if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to read configuration directory: %s", err) + return nil, fmt.Errorf("unable to read configuration directory: %w", err) } for _, f := range files { if f.IsDir() { @@ -2903,7 +3011,7 @@ func ReadDirConfig(dirName string) (map[string]interface{}, error) { func MergeConfig(vp *viper.Viper, m map[string]interface{}) error { err := vp.MergeConfigMap(m) if err != nil { - return fmt.Errorf("unable to read merge directory configuration: %s", err) + return fmt.Errorf("unable to read merge directory configuration: %w", err) } return nil } @@ -2935,7 +3043,7 @@ func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error { for _, ipString := range s { _, ipnet, err := net.ParseCIDR(ipString) if err != nil { - return fmt.Errorf("unable to parse excluded local address %s: %s", ipString, err) + return fmt.Errorf("unable to parse excluded local address %s: %w", ipString, err) } c.ExcludeLocalAddresses = append(c.ExcludeLocalAddresses, ipnet) @@ -2960,18 +3068,16 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.AutoCreateCiliumNodeResource = vp.GetBool(AutoCreateCiliumNodeResource) c.BPFRoot = vp.GetString(BPFRoot) c.CGroupRoot = vp.GetString(CGroupRoot) - c.ClusterID = vp.GetUint32(ClusterIDName) - c.ClusterName = vp.GetString(ClusterName) + c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID) + c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName) + c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters) c.DatapathMode = vp.GetString(DatapathMode) c.Debug = vp.GetBool(DebugArg) c.DebugVerbose = vp.GetStringSlice(DebugVerbose) c.DirectRoutingDevice = vp.GetString(DirectRoutingDevice) - c.LBDevInheritIPAddr = vp.GetString(LBDevInheritIPAddr) c.EnableIPv4 = vp.GetBool(EnableIPv4Name) c.EnableIPv6 = vp.GetBool(EnableIPv6Name) c.EnableIPv6NDP = vp.GetBool(EnableIPv6NDPName) - c.EnableIPv6BIGTCP = vp.GetBool(EnableIPv6BIGTCP) - c.EnableIPv4BIGTCP = vp.GetBool(EnableIPv4BIGTCP) c.EnableSRv6 = vp.GetBool(EnableSRv6) c.SRv6EncapMode = vp.GetString(SRv6EncapModeName) c.EnableSCTP = vp.GetBool(EnableSCTPName) @@ -2983,10 +3089,12 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline) c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod) c.EnableWireguardUserspaceFallback = vp.GetBool(EnableWireguardUserspaceFallback) + c.WireguardPersistentKeepalive = vp.GetDuration(WireguardPersistentKeepalive) c.EnableWellKnownIdentities = vp.GetBool(EnableWellKnownIdentities) c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter) c.DisableCiliumEndpointCRD = vp.GetBool(DisableCiliumEndpointCRDName) - c.EgressMasqueradeInterfaces = vp.GetString(EgressMasqueradeInterfaces) + c.MasqueradeInterfaces = vp.GetStringSlice(MasqueradeInterfaces) + c.EgressMasqueradeInterfaces = strings.Join(c.MasqueradeInterfaces, ",") c.BPFSocketLBHostnsOnly = vp.GetBool(BPFSocketLBHostnsOnly) c.EnableSocketLB = vp.GetBool(EnableSocketLB) c.EnableSocketLBTracing = vp.GetBool(EnableSocketLBTracing) @@ -2998,6 +3106,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableHealthChecking = vp.GetBool(EnableHealthChecking) c.EnableEndpointHealthChecking = vp.GetBool(EnableEndpointHealthChecking) c.EnableHealthCheckNodePort = vp.GetBool(EnableHealthCheckNodePort) + c.EnableHealthCheckLoadBalancerIP = vp.GetBool(EnableHealthCheckLoadBalancerIP) c.EnableLocalNodeRoute = vp.GetBool(EnableLocalNodeRoute) c.EnablePolicy = strings.ToLower(vp.GetString(EnablePolicy)) c.EnableExternalIPs = vp.GetBool(EnableExternalIPs) @@ -3014,9 +3123,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableAutoProtectNodePortRange = vp.GetBool(EnableAutoProtectNodePortRange) c.KubeProxyReplacement = vp.GetString(KubeProxyReplacement) c.EnableSessionAffinity = vp.GetBool(EnableSessionAffinity) - c.EnableServiceTopology = vp.GetBool(EnableServiceTopology) - c.EnableBandwidthManager = vp.GetBool(EnableBandwidthManager) - c.EnableBBR = vp.GetBool(EnableBBR) c.EnableRecorder = vp.GetBool(EnableRecorder) c.EnableMKE = vp.GetBool(EnableMKE) c.CgroupPathMKE = vp.GetString(CgroupPathMKE) @@ -3034,6 +3140,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IdentityChangeGracePeriod = vp.GetDuration(IdentityChangeGracePeriod) c.IdentityRestoreGracePeriod = vp.GetDuration(IdentityRestoreGracePeriod) c.IPAM = vp.GetString(IPAM) + c.IPAMDefaultIPPool = vp.GetString(IPAMDefaultIPPool) c.IPv4Range = vp.GetString(IPv4Range) c.IPv4NodeAddr = vp.GetString(IPv4NodeAddr) c.IPv4ServiceRange = vp.GetString(IPv4ServiceRange) @@ -3042,11 +3149,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IPv6Range = vp.GetString(IPv6Range) c.IPv6ServiceRange = vp.GetString(IPv6ServiceRange) c.JoinCluster = vp.GetBool(JoinClusterName) - c.K8sEnableK8sEndpointSlice = vp.GetBool(K8sEnableEndpointSlice) c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName) c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName) c.K8sServiceCacheSize = uint(vp.GetInt(K8sServiceCacheSize)) - c.K8sEventHandover = vp.GetBool(K8sEventHandover) + c.LegacyTurnOffK8sEventHandover = vp.GetBool(LegacyTurnOffK8sEventHandover) c.K8sSyncTimeout = vp.GetDuration(K8sSyncTimeoutName) c.AllocatorListTimeout = vp.GetDuration(AllocatorListTimeoutName) c.K8sWatcherEndpointSelector = vp.GetString(K8sWatcherEndpointSelector) @@ -3056,7 +3162,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor c.KVstorePeriodicSync = vp.GetDuration(KVstorePeriodicSync) c.KVstoreConnectivityTimeout = vp.GetDuration(KVstoreConnectivityTimeout) - c.KVstoreMaxConsecutiveQuorumErrors = vp.GetInt(KVstoreMaxConsecutiveQuorumErrorsName) + c.KVstoreMaxConsecutiveQuorumErrors = vp.GetUint(KVstoreMaxConsecutiveQuorumErrorsName) c.IPAllocationTimeout = vp.GetDuration(IPAllocationTimeout) c.LabelPrefixFile = vp.GetString(LabelPrefixFile) c.Labels = vp.GetStringSlice(Labels) @@ -3076,29 +3182,29 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnvoyConfigTimeout = vp.GetDuration(EnvoyConfigTimeout) c.IPMasqAgentConfigPath = vp.GetString(IPMasqAgentConfigPath) c.InstallIptRules = vp.GetBool(InstallIptRules) - c.IPTablesLockTimeout = vp.GetDuration(IPTablesLockTimeout) - c.IPTablesRandomFully = vp.GetBool(IPTablesRandomFully) c.IPSecKeyFile = vp.GetString(IPSecKeyFileName) c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration) c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher) + c.EnableIPSecXfrmStateCaching = vp.GetBool(EnableIPSecXfrmStateCaching) c.MonitorAggregation = vp.GetString(MonitorAggregationName) c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval) c.MTU = vp.GetInt(MTUName) c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName) - c.PrependIptablesChains = vp.GetBool(PrependIptablesChainsName) c.ProcFs = vp.GetString(ProcFs) c.ProxyConnectTimeout = vp.GetInt(ProxyConnectTimeout) + c.ProxyXffNumTrustedHopsIngress = vp.GetUint32(ProxyXffNumTrustedHopsIngress) + c.ProxyXffNumTrustedHopsEgress = vp.GetUint32(ProxyXffNumTrustedHopsEgress) c.ProxyGID = vp.GetInt(ProxyGID) c.ProxyPrometheusPort = vp.GetInt(ProxyPrometheusPort) c.ProxyMaxRequestsPerConnection = vp.GetInt(ProxyMaxRequestsPerConnection) c.ProxyMaxConnectionDuration = time.Duration(vp.GetInt64(ProxyMaxConnectionDuration)) c.ProxyIdleTimeout = time.Duration(vp.GetInt64(ProxyIdleTimeout)) + c.RestoredProxyPortsAgeLimit = vp.GetUint(RestoredProxyPortsAgeLimit) c.RestoreState = vp.GetBool(Restore) c.RouteMetric = vp.GetInt(RouteMetric) c.RunDir = vp.GetString(StateDir) c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy) c.SidecarIstioProxyImage = vp.GetString(SidecarIstioProxyImage) - c.UseSingleClusterRoute = vp.GetBool(SingleClusterRouteName) c.SocketPath = vp.GetString(SocketPath) c.TracePayloadlen = vp.GetInt(TracePayloadlen) c.Version = vp.GetString(Version) @@ -3113,28 +3219,39 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.PolicyAuditMode = vp.GetBool(PolicyAuditModeArg) c.EnableIPv4FragmentsTracking = vp.GetBool(EnableIPv4FragmentsTrackingName) c.FragmentsMapEntries = vp.GetInt(FragmentsMapEntriesName) - c.K8sServiceProxyName = vp.GetString(K8sServiceProxyName) c.CRDWaitTimeout = vp.GetDuration(CRDWaitTimeout) c.LoadBalancerDSRDispatch = vp.GetString(LoadBalancerDSRDispatch) c.LoadBalancerDSRL4Xlate = vp.GetString(LoadBalancerDSRL4Xlate) c.LoadBalancerRSSv4CIDR = vp.GetString(LoadBalancerRSSv4CIDR) c.LoadBalancerRSSv6CIDR = vp.GetString(LoadBalancerRSSv6CIDR) c.InstallNoConntrackIptRules = vp.GetBool(InstallNoConntrackIptRules) + c.ContainerIPLocalReservedPorts = vp.GetString(ContainerIPLocalReservedPorts) c.EnableCustomCalls = vp.GetBool(EnableCustomCallsName) c.BGPAnnounceLBIP = vp.GetBool(BGPAnnounceLBIP) c.BGPAnnouncePodCIDR = vp.GetBool(BGPAnnouncePodCIDR) c.BGPConfigPath = vp.GetString(BGPConfigPath) + c.BGPSecretsNamespace = vp.GetString(BGPSecretsNamespace) c.ExternalClusterIP = vp.GetBool(ExternalClusterIPName) c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway) c.EnableHighScaleIPcache = vp.GetBool(EnableHighScaleIPcache) c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4 c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6 c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade) + c.EnableMasqueradeRouteSource = vp.GetBool(EnableMasqueradeRouteSource) c.DeriveMasqIPAddrFromDevice = vp.GetString(DeriveMasqIPAddrFromDevice) c.EnablePMTUDiscovery = vp.GetBool(EnablePMTUDiscovery) c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR c.IPAMCiliumNodeUpdateRate = vp.GetDuration(IPAMCiliumNodeUpdateRate) + c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse) + switch c.ServiceNoBackendResponse { + case ServiceNoBackendResponseReject, ServiceNoBackendResponseDrop: + case "": + c.ServiceNoBackendResponse = defaults.ServiceNoBackendResponse + default: + log.Fatalf("Invalid value for --%s: %s (must be 'reject' or 'drop')", ServiceNoBackendResponse, c.ServiceNoBackendResponse) + } + c.populateLoadBalancerSettings(vp) c.populateDevices(vp) c.EnableRuntimeDeviceDetection = vp.GetBool(EnableRuntimeDeviceDetection) @@ -3156,38 +3273,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } c.TCFilterPriority = uint16(tcFilterPrio) - c.Tunnel = vp.GetString(TunnelName) c.RoutingMode = vp.GetString(RoutingMode) - c.TunnelProtocol = vp.GetString(TunnelProtocol) - c.TunnelPort = vp.GetInt(TunnelPortName) - - if c.Tunnel != "" && c.RoutingMode != defaults.RoutingMode { - log.Fatalf("Option --%s cannot be used in combination with --%s", RoutingMode, TunnelName) - } - - if c.Tunnel == "disabled" { - c.RoutingMode = RoutingModeNative - } else if c.Tunnel != "" { - c.TunnelProtocol = c.Tunnel - } - c.Tunnel = "" - - if c.TunnelPort == 0 { - // manually pick port for native-routing and DSR with Geneve dispatch: - if !c.TunnelingEnabled() && - (c.EnableNodePort || (c.KubeProxyReplacement == KubeProxyReplacementStrict || c.KubeProxyReplacement == KubeProxyReplacementTrue)) && - c.NodePortMode != NodePortModeSNAT && - c.LoadBalancerDSRDispatch == DSRDispatchGeneve { - c.TunnelPort = defaults.TunnelPortGeneve - } else { - switch c.TunnelProtocol { - case TunnelVXLAN: - c.TunnelPort = defaults.TunnelPortVXLAN - case TunnelGeneve: - c.TunnelPort = defaults.TunnelPortGeneve - } - } - } if vp.IsSet(AddressScopeMax) { c.AddressScopeMax, err = ip.ParseScope(vp.GetString(AddressScopeMax)) @@ -3205,6 +3291,26 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } } + encryptionStrictModeEnabled := vp.GetBool(EnableEncryptionStrictMode) + if encryptionStrictModeEnabled { + if c.EnableIPv6 { + log.Warnf("WireGuard encryption strict mode only support IPv4. IPv6 traffic is not protected and can be leaked.") + } + + strictCIDR := vp.GetString(EncryptionStrictModeCIDR) + c.EncryptionStrictModeCIDR, err = netip.ParsePrefix(strictCIDR) + if err != nil { + log.WithError(err).Fatalf("Cannot parse CIDR %s from --%s option", strictCIDR, EncryptionStrictModeCIDR) + } + + if !c.EncryptionStrictModeCIDR.Addr().Is4() { + log.Fatalf("%s must be an IPv4 CIDR", EncryptionStrictModeCIDR) + } + + c.EncryptionStrictModeAllowRemoteNodeIdentities = vp.GetBool(EncryptionStrictModeAllowRemoteNodeIdentities) + c.EnableEncryptionStrictMode = encryptionStrictModeEnabled + } + ipv4NativeRoutingCIDR := vp.GetString(IPv4NativeRoutingCIDR) if ipv4NativeRoutingCIDR != "" { @@ -3218,11 +3324,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } } - if c.EnableIPv4 && ipv4NativeRoutingCIDR == "" && c.EnableAutoDirectRouting { - log.Warnf("If %s is enabled, then you are recommended to also configure %s. If %s is not configured, this may lead to pod to pod traffic being masqueraded, "+ - "which can cause problems with performance, observability and policy", EnableAutoDirectRoutingName, IPv4NativeRoutingCIDR, IPv4NativeRoutingCIDR) - } - ipv6NativeRoutingCIDR := vp.GetString(IPv6NativeRoutingCIDR) if ipv6NativeRoutingCIDR != "" { @@ -3236,11 +3337,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } } - if c.EnableIPv6 && ipv6NativeRoutingCIDR == "" && c.EnableAutoDirectRouting { - log.Warnf("If %s is enabled, then you are recommended to also configure %s. If %s is not configured, this may lead to pod to pod traffic being masqueraded, "+ - "which can cause problems with performance, observability and policy", EnableAutoDirectRoutingName, IPv6NativeRoutingCIDR, IPv6NativeRoutingCIDR) - } - if err := c.calculateBPFMapSizes(vp); err != nil { log.Fatal(err) } @@ -3272,8 +3368,12 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.FQDNProxyResponseMaxDelay = vp.GetDuration(FQDNProxyResponseMaxDelay) c.DNSProxyConcurrencyLimit = vp.GetInt(DNSProxyConcurrencyLimit) c.DNSProxyConcurrencyProcessingGracePeriod = vp.GetDuration(DNSProxyConcurrencyProcessingGracePeriod) + c.DNSProxyEnableTransparentMode = vp.GetBool(DNSProxyEnableTransparentMode) + c.DNSProxyInsecureSkipTransparentModeCheck = vp.GetBool(DNSProxyInsecureSkipTransparentModeCheck) c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount) c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout) + c.DNSProxySocketLingerTimeout = vp.GetInt(DNSProxySocketLingerTimeout) + c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode) // Convert IP strings into net.IPNet types subnets, invalid := ip.ParseCIDRs(vp.GetStringSlice(IPv4PodSubnets)) @@ -3322,6 +3422,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval) + c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval) if m, err := command.GetStringMapStringE(vp, KVStoreOpt); err != nil { log.Fatalf("unable to parse %s: %s", KVStoreOpt, err) @@ -3335,12 +3436,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.LogOpt = m } - if m, err := command.GetStringMapStringE(vp, APIRateLimitName); err != nil { - log.Fatalf("unable to parse %s: %s", APIRateLimitName, err) - } else { - c.APIRateLimit = m - } - c.bpfMapEventConfigs = make(BPFEventBufferConfigs) parseBPFMapEventConfigs(c.bpfMapEventConfigs, defaults.BPFEventBufferConfigs) if m, err := command.GetStringMapStringE(vp, BPFMapEventBuffers); err != nil { @@ -3390,14 +3485,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { log.Warningf("Running Cilium with %q=%q requires endpoint CRDs. Changing %s to %t", KVStore, c.KVStore, DisableCiliumEndpointCRDName, false) c.DisableCiliumEndpointCRD = false } - if c.K8sEventHandover { - log.Warningf("Running Cilium with %q=%q requires KVStore capability. Changing %s to %t", KVStore, c.KVStore, K8sEventHandover, false) - c.K8sEventHandover = false - } } switch c.IPAM { - case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool, ipamOption.IPAMClusterPoolV2: + case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool: if c.EnableIPv4 { c.K8sRequireIPv4PodCIDR = true } @@ -3411,7 +3502,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } else { c.IPAMMultiPoolPreAllocation = m } - + if len(c.IPAMMultiPoolPreAllocation) == 0 { + // Default to the same value as IPAMDefaultIPPool + c.IPAMMultiPoolPreAllocation = map[string]string{c.IPAMDefaultIPPool: "8"} + } c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr) // Hubble options. @@ -3435,13 +3529,52 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.HubbleExportFileMaxSizeMB = vp.GetInt(HubbleExportFileMaxSizeMB) c.HubbleExportFileMaxBackups = vp.GetInt(HubbleExportFileMaxBackups) c.HubbleExportFileCompress = vp.GetBool(HubbleExportFileCompress) + + for _, enc := range vp.GetStringSlice(HubbleExportAllowlist) { + dec := json.NewDecoder(strings.NewReader(enc)) + var result flowpb.FlowFilter + if err := dec.Decode(&result); err != nil { + if errors.Is(err, io.EOF) { + break + } + log.Fatalf("failed to decode hubble-export-allowlist '%v': %s", enc, err) + } + c.HubbleExportAllowlist = append(c.HubbleExportAllowlist, &result) + } + + for _, enc := range vp.GetStringSlice(HubbleExportDenylist) { + dec := json.NewDecoder(strings.NewReader(enc)) + var result flowpb.FlowFilter + if err := dec.Decode(&result); err != nil { + if errors.Is(err, io.EOF) { + break + } + log.Fatalf("failed to decode hubble-export-denylist '%v': %s", enc, err) + } + c.HubbleExportDenylist = append(c.HubbleExportDenylist, &result) + } + + if fm := vp.GetStringSlice(HubbleExportFieldmask); len(fm) > 0 { + _, err := fieldmaskpb.New(&flowpb.Flow{}, fm...) + if err != nil { + log.Fatalf("hubble-export-fieldmask contains invalid fieldmask '%v': %s", fm, err) + } + c.HubbleExportFieldmask = vp.GetStringSlice(HubbleExportFieldmask) + } + + c.HubbleFlowlogsConfigFilePath = vp.GetString(HubbleFlowlogsConfigFilePath) + c.EnableHubbleRecorderAPI = vp.GetBool(EnableHubbleRecorderAPI) c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath) c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize) c.HubbleSkipUnknownCGroupIDs = vp.GetBool(HubbleSkipUnknownCGroupIDs) c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents) - - c.DisableIptablesFeederRules = vp.GetStringSlice(DisableIptablesFeederRules) + c.HubbleRedactEnabled = vp.GetBool(HubbleRedactEnabled) + c.HubbleRedactHttpURLQuery = vp.GetBool(HubbleRedactHttpURLQuery) + c.HubbleRedactHttpUserInfo = vp.GetBool(HubbleRedactHttpUserInfo) + c.HubbleRedactKafkaApiKey = vp.GetBool(HubbleRedactKafkaApiKey) + c.HubbleRedactHttpHeadersAllow = vp.GetStringSlice(HubbleRedactHttpHeadersAllow) + c.HubbleRedactHttpHeadersDeny = vp.GetStringSlice(HubbleRedactHttpHeadersDeny) // Hidden options c.CompilerFlags = vp.GetStringSlice(CompilerFlags) @@ -3452,12 +3585,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.MaxControllerInterval = vp.GetInt(MaxCtrlIntervalName) c.PolicyQueueSize = sanitizeIntParam(vp, PolicyQueueSize, defaults.PolicyQueueSize) c.EndpointQueueSize = sanitizeIntParam(vp, EndpointQueueSize, defaults.EndpointQueueSize) - c.DisableCNPStatusUpdates = vp.GetBool(DisableCNPStatusUpdates) c.EnableICMPRules = vp.GetBool(EnableICMPRules) c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec) c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore) c.EnableK8sTerminatingEndpoint = vp.GetBool(EnableK8sTerminatingEndpoint) - c.EnableStaleCiliumEndpointCleanup = vp.GetBool(EnableStaleCiliumEndpointCleanup) // Disable Envoy version check if L7 proxy is disabled. c.DisableEnvoyVersionCheck = vp.GetBool(DisableEnvoyVersionCheck) @@ -3484,6 +3615,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { // To support K8s NetworkPolicy c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy) + c.PolicyCIDRMatchMode = vp.GetStringSlice(PolicyCIDRMatchMode) } func (c *DaemonConfig) populateDevices(vp *viper.Viper) { @@ -3548,11 +3680,11 @@ func (c *DaemonConfig) populateNodePortRange(vp *viper.Viper) error { c.NodePortMin, err = strconv.Atoi(nodePortRange[0]) if err != nil { - return fmt.Errorf("Unable to parse min port value for NodePort range: %s", err.Error()) + return fmt.Errorf("Unable to parse min port value for NodePort range: %w", err) } c.NodePortMax, err = strconv.Atoi(nodePortRange[1]) if err != nil { - return fmt.Errorf("Unable to parse max port value for NodePort range: %s", err.Error()) + return fmt.Errorf("Unable to parse max port value for NodePort range: %w", err) } if c.NodePortMax <= c.NodePortMin { return errors.New("NodePort range min port must be smaller than max port") @@ -3654,29 +3786,52 @@ func (c *DaemonConfig) checkMapSizeLimits() error { } func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error { - if c.GetIPv4NativeRoutingCIDR() == nil && c.EnableIPv4Masquerade && !c.TunnelingEnabled() && - c.IPAMMode() != ipamOption.IPAMENI && c.EnableIPv4 && c.IPAMMode() != ipamOption.IPAMAlibabaCloud { - return fmt.Errorf( - "native routing cidr must be configured with option --%s "+ - "in combination with --%s --%s=%s --%s=%s --%s=true", - IPv4NativeRoutingCIDR, EnableIPv4Masquerade, RoutingMode, RoutingModeNative, - IPAM, c.IPAMMode(), EnableIPv4Name) + if c.GetIPv4NativeRoutingCIDR() != nil { + return nil + } + if !c.EnableIPv4 || !c.EnableIPv4Masquerade { + return nil + } + if c.EnableIPMasqAgent { + return nil + } + if c.TunnelingEnabled() { + return nil + } + if c.IPAMMode() == ipamOption.IPAMENI || c.IPAMMode() == ipamOption.IPAMAlibabaCloud { + return nil } - return nil + return fmt.Errorf( + "native routing cidr must be configured with option --%s "+ + "in combination with --%s=true --%s=true --%s=false --%s=%s --%s=%s", + IPv4NativeRoutingCIDR, + EnableIPv4Name, EnableIPv4Masquerade, + EnableIPMasqAgent, + RoutingMode, RoutingModeNative, + IPAM, c.IPAMMode()) } func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error { - if c.GetIPv6NativeRoutingCIDR() == nil && c.EnableIPv6Masquerade && !c.TunnelingEnabled() && - c.EnableIPv6 { - return fmt.Errorf( - "native routing cidr must be configured with option --%s "+ - "in combination with --%s --%s=%s --%s=true", - IPv6NativeRoutingCIDR, EnableIPv6Masquerade, RoutingMode, RoutingModeNative, - EnableIPv6Name) + if c.GetIPv6NativeRoutingCIDR() != nil { + return nil } - - return nil + if !c.EnableIPv6 || !c.EnableIPv6Masquerade { + return nil + } + if c.EnableIPMasqAgent { + return nil + } + if c.TunnelingEnabled() { + return nil + } + return fmt.Errorf( + "native routing cidr must be configured with option --%s "+ + "in combination with --%s=true --%s=true --%s=false --%s=%s", + IPv6NativeRoutingCIDR, + EnableIPv6Name, EnableIPv6Masquerade, + EnableIPMasqAgent, + RoutingMode, RoutingModeNative) } func (c *DaemonConfig) checkIPAMDelegatedPlugin() error { @@ -3716,6 +3871,7 @@ func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error { c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName) c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName) c.PolicyMapEntries = vp.GetInt(PolicyMapEntriesName) + c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName) c.SockRevNatEntries = vp.GetInt(SockRevNatEntriesName) c.LBMapEntries = vp.GetInt(LBMapEntriesName) c.LBServiceMapEntries = vp.GetInt(LBServiceMapMaxEntries) @@ -3943,6 +4099,16 @@ func (c *DaemonConfig) BGPControlPlaneEnabled() bool { return c.EnableBGPControlPlane } +func (c *DaemonConfig) IsDualStack() bool { + return c.EnableIPv4 && c.EnableIPv6 +} + +// IsLocalRouterIP checks if provided IP address matches either LocalRouterIPv4 +// or LocalRouterIPv6 +func (c *DaemonConfig) IsLocalRouterIP(ip string) bool { + return ip != "" && (c.LocalRouterIPv4 == ip || c.LocalRouterIPv6 == ip) +} + // StoreViperInFile stores viper's configuration in a the given directory under // the file name 'viper-config.yaml'. If this file already exists, it is renamed // to 'viper-config-1.yaml', if 'viper-config-1.yaml' also exists, @@ -4215,9 +4381,19 @@ func parseBPFMapEventConfigs(confs BPFEventBufferConfigs, confMap map[string]str for name, confStr := range confMap { conf, err := ParseEventBufferTupleString(confStr) if err != nil { - return fmt.Errorf("unable to parse %s: %s", BPFMapEventBuffers, err) + return fmt.Errorf("unable to parse %s: %w", BPFMapEventBuffers, err) } confs[name] = conf } return nil } + +func (d *DaemonConfig) EnforceLXCFibLookup() bool { + // See https://github.com/cilium/cilium/issues/27343 for the symptoms. + // + // We want to enforce FIB lookup if EndpointRoutes are enabled, because + // this was a config dependency change which caused different behaviour + // since v1.14.0-snapshot.2. We will remove this hack later, once we + // have auto-device detection on by default. + return d.EnableEndpointRoutes +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go index 5e70c3cd62..e9df056094 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go @@ -10,7 +10,7 @@ import ( "github.com/cilium/cilium/pkg/ip" "github.com/cilium/cilium/pkg/labels" - cidrpkg "github.com/cilium/cilium/pkg/labels/cidr" + "github.com/cilium/cilium/pkg/option" ) // +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/([0-9]|[1-2][0-9]|3[0-2])$|^s*((([0-9A-Fa-f]{1,4}:){7}(:|([0-9A-Fa-f]{1,4})))|(([0-9A-Fa-f]{1,4}:){6}:([0-9A-Fa-f]{1,4})?)|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){0,1}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){0,2}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){0,3}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){0,4}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){0,5}):([0-9A-Fa-f]{1,4})?))|(:(:|((:[0-9A-Fa-f]{1,4}){1,7}))))(%.+)?s*/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$` @@ -19,18 +19,10 @@ import ( // Example: 192.0.2.1/32 type CIDR string -// CIDRMatchAll is a []CIDR that matches everything -var CIDRMatchAll = []CIDR{CIDR("0.0.0.0/0"), CIDR("::/0")} - -// MatchesAll determines whether the CIDR matches all traffic. -func (c CIDR) MatchesAll() bool { - for _, wildcard := range CIDRMatchAll { - if c == wildcard { - return true - } - } - return false -} +var ( + ipv4All = CIDR("0.0.0.0/0") + ipv6All = CIDR("::/0") +) // CIDRRule is a rule that specifies a CIDR prefix to/from which outside // communication is allowed, along with an optional list of subnets within that @@ -83,20 +75,39 @@ func (s CIDRSlice) GetAsEndpointSelectors() EndpointSelectorSlice { // If multiple CIDRs representing reserved:world are in this CIDRSlice, // we only have to add the EndpointSelector representing reserved:world // once. - var hasWorldBeenAdded bool + var hasIPv4AllBeenAdded, hasIPv6AllBeenAdded bool slice := EndpointSelectorSlice{} for _, cidr := range s { - if cidr.MatchesAll() && !hasWorldBeenAdded { - hasWorldBeenAdded = true - slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + if cidr == ipv4All { + hasIPv4AllBeenAdded = true } - lbl, err := cidrpkg.IPStringToLabel(string(cidr)) + if cidr == ipv6All { + hasIPv6AllBeenAdded = true + } + lbl, err := labels.IPStringToLabel(string(cidr)) if err == nil { slice = append(slice, NewESFromLabels(lbl)) } // TODO: Log the error? } + if option.Config.IsDualStack() { + // If Cilium is in dual-stack mode then world-ipv4 and + // world-ipv6 need to be distinguished from one another. + if hasIPv4AllBeenAdded && hasIPv6AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + } + if hasIPv4AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv4]) + } + if hasIPv6AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv6]) + } + } else if option.Config.EnableIPv4 && hasIPv4AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + } else if option.Config.EnableIPv6 && hasIPv6AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + } return slice } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go b/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go index 2af43d19df..f4eac3d721 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go @@ -148,15 +148,6 @@ type EgressRule struct { // ToFQDN rule will not apply to that IP. // Note: ToFQDN cannot occur in the same policy as other To* rules. // - // The current implementation has a number of limitations: - // - The DNS resolution originates from cilium-agent, and not from the pods. - // Differences between the responses seen by cilium agent and a particular - // pod will whitelist the incorrect IP. - // - DNS TTLs are ignored, and cilium-agent will repoll on a short interval - // (5 seconds). Each change to the DNS data will trigger a policy - // regeneration. This may result in delayed updates to the policy for an - // endpoint when the data changes often or the system is under load. - // // +kubebuilder:validation:Optional ToFQDNs FQDNSelectorSlice `json:"toFQDNs,omitempty"` diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go b/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go index a12d12e934..084d581518 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go @@ -23,6 +23,16 @@ const ( // endpoint's cluster EntityWorld Entity = "world" + // EntityWorldIPv4 is an entity that represents traffic external to + // endpoint's cluster, specifically an IPv4 endpoint, to distinguish + // it from IPv6 in dual-stack mode. + EntityWorldIPv4 Entity = "world-ipv4" + + // EntityWorldIPv6 is an entity that represents traffic external to + // endpoint's cluster, specifically an IPv6 endpoint, to distinguish + // it from IPv4 in dual-stack mode. + EntityWorldIPv6 Entity = "world-ipv6" + // EntityCluster is an entity that represents traffic within the // endpoint's cluster, to endpoints not managed by cilium EntityCluster Entity = "cluster" @@ -55,6 +65,10 @@ const ( var ( endpointSelectorWorld = NewESFromLabels(labels.NewLabel(labels.IDNameWorld, "", labels.LabelSourceReserved)) + endpointSelectorWorldIPv4 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv4, "", labels.LabelSourceReserved)) + + endpointSelectorWorldIPv6 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv6, "", labels.LabelSourceReserved)) + endpointSelectorHost = NewESFromLabels(labels.NewLabel(labels.IDNameHost, "", labels.LabelSourceReserved)) endpointSelectorInit = NewESFromLabels(labels.NewLabel(labels.IDNameInit, "", labels.LabelSourceReserved)) @@ -75,7 +89,9 @@ var ( // policies to selectors EntitySelectorMapping = map[Entity]EndpointSelectorSlice{ EntityAll: {WildcardEndpointSelector}, - EntityWorld: {endpointSelectorWorld}, + EntityWorld: {endpointSelectorWorld, endpointSelectorWorldIPv4, endpointSelectorWorldIPv6}, + EntityWorldIPv4: {endpointSelectorWorldIPv4}, + EntityWorldIPv6: {endpointSelectorWorldIPv6}, EntityHost: {endpointSelectorHost}, EntityInit: {endpointSelectorInit}, EntityIngress: {endpointSelectorIngress}, diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go index a9ad79e6ad..fb3174ead0 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go @@ -7,9 +7,9 @@ import ( "context" "fmt" "net/netip" - "sync" "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/lock" ) const ( @@ -17,7 +17,7 @@ const ( ) var ( - providers = sync.Map{} // map with the list of providers to callback to retrieve info from. + providers lock.Map[string, GroupProviderFunc] // map with the list of providers to callback to retrieve info from. ) // GroupProviderFunc is a func that need to be register to be able to @@ -50,18 +50,14 @@ func (group *ToGroups) GetCidrSet(ctx context.Context) ([]CIDRRule, error) { var addrs []netip.Addr // Get per provider CIDRSet if group.AWS != nil { - callbackInterface, ok := providers.Load(AWSProvider) + callback, ok := providers.Load(AWSProvider) if !ok { return nil, fmt.Errorf("Provider %s is not registered", AWSProvider) } - callback, ok := callbackInterface.(GroupProviderFunc) - if !ok { - return nil, fmt.Errorf("Provider callback for %s is not a valid instance", AWSProvider) - } awsAddrs, err := callback(ctx, group) if err != nil { return nil, fmt.Errorf( - "Cannot retrieve data from %s provider: %s", + "Cannot retrieve data from %s provider: %w", AWSProvider, err) } addrs = append(addrs, awsAddrs...) diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/http.go b/vendor/github.com/cilium/cilium/pkg/policy/api/http.go index fa7ee9173b..9f0daa4f05 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/http.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/http.go @@ -80,7 +80,10 @@ type PortRuleHTTP struct { Method string `json:"method,omitempty"` // Host is an extended POSIX regex matched against the host header of a - // request, e.g. "foo.com" + // request. Examples: + // + // - foo.bar.com will match the host fooXbar.com or foo-bar.com + // - foo\.bar\.com will only match the host foo.bar.com // // If omitted or empty, the value of the host header is ignored. // diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go index d41adf1b63..a717f498a7 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go @@ -23,6 +23,11 @@ const ( PortProtocolAny = "0/ANY" ) +// IsAny returns true if an L4Proto represents ANY protocol +func (l4 L4Proto) IsAny() bool { + return l4 == ProtoAny || string(l4) == "" +} + // PortProtocol specifies an L4 port with an optional transport protocol type PortProtocol struct { // Port is an L4 port number. For now the string will be strictly @@ -56,7 +61,7 @@ func (p PortProtocol) Covers(other PortProtocol) bool { return false } if p.Protocol != other.Protocol { - return p.Protocol == "" || p.Protocol == ProtoAny + return p.Protocol.IsAny() } return true } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go index d83d0b0f30..f0224b1f48 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go @@ -29,8 +29,6 @@ type Authentication struct { Mode AuthenticationMode `json:"mode"` } -// +kubebuilder:validation:Type=object - // Rule is a policy rule which must be applied to all endpoints which match the // labels contained in the endpointSelector // diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go index 3828334b51..b2db0b45d0 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go @@ -6,7 +6,7 @@ package api import ( "errors" "fmt" - "net" + "net/netip" "strconv" "strings" @@ -19,8 +19,6 @@ const ( maxICMPFields = 40 ) -type exists struct{} - // Sanitize validates and sanitizes a policy rule. Minor edits such as // capitalization of the protocol name are automatically fixed up. More // fundamental violations will cause an error to be returned. @@ -146,21 +144,16 @@ func (i *IngressRule) sanitize() error { } } - prefixLengths := map[int]exists{} for n := range i.FromCIDR { - prefixLength, err := i.FromCIDR[n].sanitize() - if err != nil { + if err := i.FromCIDR[n].sanitize(); err != nil { return err } - prefixLengths[prefixLength] = exists{} } for n := range i.FromCIDRSet { - prefixLength, err := i.FromCIDRSet[n].sanitize() - if err != nil { + if err := i.FromCIDRSet[n].sanitize(); err != nil { return err } - prefixLengths[prefixLength] = exists{} } for _, fromEntity := range i.FromEntities { @@ -255,20 +248,15 @@ func (e *EgressRule) sanitize() error { } } - prefixLengths := map[int]exists{} for i := range e.ToCIDR { - prefixLength, err := e.ToCIDR[i].sanitize() - if err != nil { + if err := e.ToCIDR[i].sanitize(); err != nil { return err } - prefixLengths[prefixLength] = exists{} } for i := range e.ToCIDRSet { - prefixLength, err := e.ToCIDRSet[i].sanitize() - if err != nil { + if err := e.ToCIDRSet[i].sanitize(); err != nil { return err } - prefixLengths[prefixLength] = exists{} } for _, toEntity := range e.ToEntities { @@ -424,7 +412,7 @@ func (pp *PortProtocol) sanitize() (isZero bool, err error) { } else { p, err := strconv.ParseUint(pp.Port, 0, 16) if err != nil { - return isZero, fmt.Errorf("Unable to parse port: %s", err) + return isZero, fmt.Errorf("Unable to parse port: %w", err) } isZero = p == 0 } @@ -447,68 +435,61 @@ func (ir *ICMPRule) verify() error { return nil } -// sanitize the given CIDR. If successful, returns the prefixLength specified -// in the cidr and nil. Otherwise, returns (0, nil). -func (c CIDR) sanitize() (prefixLength int, err error) { +// sanitize the given CIDR. +func (c CIDR) sanitize() error { strCIDR := string(c) if strCIDR == "" { - return 0, fmt.Errorf("IP must be specified") + return fmt.Errorf("IP must be specified") } - _, ipnet, err := net.ParseCIDR(strCIDR) - if err == nil { - var bits int - prefixLength, bits = ipnet.Mask.Size() - if prefixLength == 0 && bits == 0 { - return 0, fmt.Errorf("CIDR cannot specify non-contiguous mask %s", - ipnet.Mask.String()) - } - } else { - // Try to parse as a fully masked IP or an IP subnetwork - ip := net.ParseIP(strCIDR) - if ip == nil { - return 0, fmt.Errorf("Unable to parse CIDR: %s", err) + prefix, err := netip.ParsePrefix(strCIDR) + if err != nil { + _, err := netip.ParseAddr(strCIDR) + if err != nil { + return fmt.Errorf("unable to parse CIDR: %w", err) } + return nil + } + prefixLength := prefix.Bits() + if prefixLength < 0 { + return fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix) } - return prefixLength, nil + return nil } // sanitize validates a CIDRRule by checking that the CIDR prefix itself is // valid, and ensuring that all of the exception CIDR prefixes are contained // within the allowed CIDR prefix. -func (c *CIDRRule) sanitize() (prefixLength int, err error) { - +func (c *CIDRRule) sanitize() error { // Only allow notation /. Note that this differs from // the logic in api.CIDR.Sanitize(). - _, cidrNet, err := net.ParseCIDR(string(c.Cidr)) + prefix, err := netip.ParsePrefix(string(c.Cidr)) if err != nil { - return 0, fmt.Errorf("Unable to parse CIDRRule %q: %s", c.Cidr, err) + return fmt.Errorf("Unable to parse CIDRRule %q: %w", c.Cidr, err) } - var bits int - prefixLength, bits = cidrNet.Mask.Size() - if prefixLength == 0 && bits == 0 { - return 0, fmt.Errorf("CIDR cannot specify non-contiguous mask %s", - cidrNet.Mask.String()) + prefixLength := prefix.Bits() + if prefixLength < 0 { + return fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix) } // Ensure that each provided exception CIDR prefix is formatted correctly, // and is contained within the CIDR prefix to/from which we want to allow // traffic. for _, p := range c.ExceptCIDRs { - exceptCIDRAddr, _, err := net.ParseCIDR(string(p)) + except, err := netip.ParsePrefix(string(p)) if err != nil { - return 0, err + return err } // Note: this also checks that the allow CIDR prefix and the exception // CIDR prefixes are part of the same address family. - if !cidrNet.Contains(exceptCIDRAddr) { - return 0, fmt.Errorf("allow CIDR prefix %s does not contain "+ + if !prefix.Contains(except.Addr()) { + return fmt.Errorf("allow CIDR prefix %s does not contain "+ "exclude CIDR prefix %s", c.Cidr, p) } } - return prefixLength, nil + return nil } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go index 6da00c9a32..c23aa0d9c2 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go @@ -164,7 +164,6 @@ func (n EndpointSelector) GetMatch(key string) ([]string, bool) { func labelSelectorToRequirements(labelSelector *slim_metav1.LabelSelector) *k8sLbls.Requirements { selector, err := slim_metav1.LabelSelectorAsSelector(labelSelector) if err != nil { - metrics.PolicyImportErrorsTotal.Inc() // Deprecated in Cilium 1.14, to be removed in 1.15. metrics.PolicyChangeTotal.WithLabelValues(metrics.LabelValueOutcomeFail).Inc() log.WithError(err).WithField(logfields.EndpointLabelSelector, logfields.Repr(labelSelector)).Error("unable to construct selector in label selector") @@ -234,6 +233,8 @@ var ( labels.IDNameHost: newReservedEndpointSelector(labels.IDNameHost), labels.IDNameRemoteNode: newReservedEndpointSelector(labels.IDNameRemoteNode), labels.IDNameWorld: newReservedEndpointSelector(labels.IDNameWorld), + labels.IDNameWorldIPv4: newReservedEndpointSelector(labels.IDNameWorldIPv4), + labels.IDNameWorldIPv6: newReservedEndpointSelector(labels.IDNameWorldIPv6), } ) @@ -342,9 +343,9 @@ func (n *EndpointSelector) ConvertToLabelSelectorRequirementSlice() []slim_metav // sanitize returns an error if the EndpointSelector's LabelSelector is invalid. func (n *EndpointSelector) sanitize() error { - errList := validation.ValidateLabelSelector(n.LabelSelector, nil) + errList := validation.ValidateLabelSelector(n.LabelSelector, validation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, nil) if len(errList) > 0 { - return fmt.Errorf("invalid label selector: %s", errList.ToAggregate().Error()) + return fmt.Errorf("invalid label selector: %w", errList.ToAggregate()) } return nil } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go index 7424d34005..674f9c2508 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go @@ -146,7 +146,7 @@ const ( ForceNamespace Option = iota ) -func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) string { +func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) (name string, updated bool) { forceNamespace := false for _, option := range options { switch option { @@ -157,7 +157,7 @@ func ResourceQualifiedName(namespace, cecName, resourceName string, options ...O idx := strings.IndexRune(resourceName, '/') if resourceName == "" || idx >= 0 && (!forceNamespace || (idx == len(namespace) && strings.HasPrefix(resourceName, namespace))) { - return resourceName + return resourceName, false } var sb strings.Builder @@ -168,5 +168,5 @@ func ResourceQualifiedName(namespace, cecName, resourceName string, options ...O sb.WriteRune('/') sb.WriteString(resourceName) - return sb.String() + return sb.String(), true } diff --git a/vendor/github.com/cilium/cilium/pkg/promise/promise.go b/vendor/github.com/cilium/cilium/pkg/promise/promise.go index 1164b9cf13..1ca251eb07 100644 --- a/vendor/github.com/cilium/cilium/pkg/promise/promise.go +++ b/vendor/github.com/cilium/cilium/pkg/promise/promise.go @@ -130,3 +130,14 @@ func Map[A, B any](p Promise[A], transform func(A) B) Promise[B] { return transform(v), nil }) } + +// MapError transforms the error of a rejected promise with the provided function. +func MapError[A any](p Promise[A], transform func(error) error) Promise[A] { + return wrappedPromise[A](func(ctx context.Context) (out A, err error) { + v, err := p.Await(ctx) + if err != nil { + err = transform(err) + } + return v, err + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go index 99240e6262..cc00c25ac5 100644 --- a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go +++ b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go @@ -5,11 +5,11 @@ package safetime import ( "runtime" - "time" "github.com/sirupsen/logrus" "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/time" ) // TimeSinceSafe returns the duration since t. If the duration is negative, diff --git a/vendor/github.com/cilium/cilium/pkg/slices/slices.go b/vendor/github.com/cilium/cilium/pkg/slices/slices.go index 243a00819e..be9652454d 100644 --- a/vendor/github.com/cilium/cilium/pkg/slices/slices.go +++ b/vendor/github.com/cilium/cilium/pkg/slices/slices.go @@ -4,10 +4,10 @@ package slices import ( + "slices" "sort" "golang.org/x/exp/constraints" - "golang.org/x/exp/slices" ) // Unique deduplicates the elements in the input slice, preserving their ordering and diff --git a/vendor/github.com/cilium/cilium/pkg/source/source.go b/vendor/github.com/cilium/cilium/pkg/source/source.go new file mode 100644 index 0000000000..4156105ab5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/source/source.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package source + +// Source describes the source of a definition +type Source string + +const ( + // Unspec is used when the source is unspecified + Unspec Source = "unspec" + + // KubeAPIServer is the source used for state which represents the + // kube-apiserver, such as the IPs associated with it. This is not to be + // confused with the Kubernetes source. + // KubeAPIServer state has the strongest ownership and can only be + // overwritten by itself. + KubeAPIServer Source = "kube-apiserver" + + // Local is the source used for state derived from local agent state. + // Local state has the second strongest ownership, behind KubeAPIServer. + Local Source = "local" + + // KVStore is the source used for state derived from a key value store. + // State in the key value stored takes precedence over orchestration + // system state such as Kubernetes. + KVStore Source = "kvstore" + + // CustomResource is the source used for state derived from Kubernetes + // custom resources + CustomResource Source = "custom-resource" + + // Kubernetes is the source used for state derived from Kubernetes + Kubernetes Source = "k8s" + + // LocalAPI is the source used for state derived from the API served + // locally on the node. + LocalAPI Source = "api" + + // Generated is the source used for generated state which can be + // overwritten by all other sources, except for restored (and unspec). + Generated Source = "generated" + + // Restored is the source used for restored state from data left behind + // by the previous agent instance. Can be overwritten by all other + // sources (except for unspec). + Restored Source = "restored" +) + +// AllowOverwrite returns true if new state from a particular source is allowed +// to overwrite existing state from another source +func AllowOverwrite(existing, new Source) bool { + switch existing { + + // KubeAPIServer state can only be overwritten by other kube-apiserver + // state. + case KubeAPIServer: + return new == KubeAPIServer + + // Local state can only be overwritten by other local state or + // kube-apiserver state. + case Local: + return new == Local || new == KubeAPIServer + + // KVStore can be overwritten by other kvstore, local state, or + // kube-apiserver state. + case KVStore: + return new == KVStore || new == Local || new == KubeAPIServer + + // Custom-resource state can be overwritten by other CRD, kvstore, + // local or kube-apiserver state. + case CustomResource: + return new == CustomResource || new == KVStore || new == Local || new == KubeAPIServer + + // Kubernetes state can be overwritten by everything except local API, + // generated, restored and unspecified state. + case Kubernetes: + return new != LocalAPI && new != Generated && new != Restored && new != Unspec + + // Local API state can be overwritten by everything except restored, + // generated and unspecified state + case LocalAPI: + return new != Generated && new != Restored && new != Unspec + + // Generated can be overwritten by everything except by Restored and + // Unspecified + case Generated: + return new != Restored && new != Unspec + + // Restored can be overwritten by everything except by Unspecified + case Restored: + return new != Unspec + + // Unspecified state can be overwritten by everything + case Unspec: + return true + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go index e3f7dbb6cd..2557233533 100644 --- a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go +++ b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go @@ -4,12 +4,11 @@ package spanstat import ( - "time" - "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/safetime" + "github.com/cilium/cilium/pkg/time" ) var ( diff --git a/vendor/github.com/cilium/cilium/pkg/stream/observable.go b/vendor/github.com/cilium/cilium/pkg/stream/observable.go new file mode 100644 index 0000000000..22e96af95d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/stream/observable.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// The stream package provides utilities for working with observable streams. +// Any type that implements the Observable interface can be transformed and +// consumed with these utilities. +package stream + +import "context" + +// Observable defines the Observe method for observing a stream of values. +// +// Also see https://reactivex.io/documentation/observable.html for in-depth +// description of observables. +// +// For interactive diagrams see https://rxmarbles.com/. +type Observable[T any] interface { + // Observe a stream of values as long as the given context is valid. + // 'next' is called for each item, and finally 'complete' is called + // when the stream is complete, or an error has occurred. + // + // Observable implementations are allowed to call 'next' and 'complete' + // from any goroutine, but never concurrently. + Observe(ctx context.Context, next func(T), complete func(error)) +} + +// FuncObservable implements the Observable interface with a function. +// +// This provides a convenient way of creating new observables without having +// to introduce a new type: +// +// var Ones Observable[int] = +// FuncObservable[int]( +// func(ctx context.Context, next func(int), complete func(error)) { +// go func() { +// defer complete(nil) +// for ctx.Err() == nil { +// next(1) +// } +// }() +// }) +// +// versus with a new type: +// +// type onesObservable struct {} +// +// func (o onesObservable) Observe(ctx context.Context, next func(int), complete func(error)) { +// go func() { +// defer complete(nil) +// for ctx.Err() == nil { +// next(1) +// } +// }() +// } +type FuncObservable[T any] func(context.Context, func(T), func(error)) + +func (f FuncObservable[T]) Observe(ctx context.Context, next func(T), complete func(error)) { + f(ctx, next, complete) +} diff --git a/vendor/github.com/cilium/cilium/pkg/stream/operators.go b/vendor/github.com/cilium/cilium/pkg/stream/operators.go new file mode 100644 index 0000000000..c12e404008 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/stream/operators.go @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package stream + +import ( + "context" + + "golang.org/x/time/rate" + + "github.com/cilium/cilium/pkg/time" +) + +// +// Operators transform the observable stream. +// + +// Map applies a function onto values of an observable and emits the resulting values. +// +// Map(Range(1,4), func(x int) int { return x * 2}) +// => [2,4,6] +func Map[A, B any](src Observable[A], apply func(A) B) Observable[B] { + return FuncObservable[B]( + func(ctx context.Context, next func(B), complete func(error)) { + src.Observe( + ctx, + func(a A) { next(apply(a)) }, + complete) + }) +} + +// Filter only emits the values for which the provided predicate returns true. +// +// Filter(Range(1,4), func(x int) int { return x%2 == 0 }) +// => [2] +func Filter[T any](src Observable[T], pred func(T) bool) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + src.Observe( + ctx, + func(x T) { + if pred(x) { + next(x) + } + }, + complete) + }) +} + +// Reduce takes an initial state, and a function 'reduce' that is called on each element +// along with a state and returns an observable with a single item: the state produced +// by the last call to 'reduce'. +// +// Reduce(Range(1,4), 0, func(sum, item int) int { return sum + item }) +// => [(0+1+2+3)] => [6] +func Reduce[Item, Result any](src Observable[Item], init Result, reduce func(Result, Item) Result) Observable[Result] { + result := init + return FuncObservable[Result]( + func(ctx context.Context, next func(Result), complete func(error)) { + src.Observe( + ctx, + func(x Item) { + result = reduce(result, x) + }, + func(err error) { + if err == nil { + next(result) + } + complete(err) + }) + }) +} + +// Distinct skips adjacent equal values. +// +// Distinct(FromSlice([]int{1,1,2,2,3}) +// => [1,2,3] +func Distinct[T comparable](src Observable[T]) Observable[T] { + var prev T + first := true + return Filter(src, func(item T) bool { + if first { + first = false + prev = item + return true + } + eq := prev == item + prev = item + return !eq + }) +} + +// RetryFunc decides whether the processing should be retried given the error +type RetryFunc func(err error) bool + +// Retry resubscribes to the observable if it completes with an error. +func Retry[T any](src Observable[T], shouldRetry RetryFunc) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + var observe func() + observe = func() { + src.Observe( + ctx, + next, + func(err error) { + if err != nil && shouldRetry(err) { + observe() + } else { + complete(err) + } + }) + } + observe() + }) +} + +// AlwaysRetry always asks for a retry regardless of the error. +func AlwaysRetry(err error) bool { + return true +} + +// BackoffRetry retries with an exponential backoff. +func BackoffRetry(shouldRetry RetryFunc, minBackoff, maxBackoff time.Duration) RetryFunc { + backoff := minBackoff + return func(err error) bool { + time.Sleep(backoff) + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + return shouldRetry(err) + } + +} + +// LimitRetries limits the number of retries with the given retry method. +// e.g. LimitRetries(BackoffRetry(time.Millisecond, time.Second), 5) +func LimitRetries(shouldRetry RetryFunc, numRetries int) RetryFunc { + return func(err error) bool { + if numRetries <= 0 { + return false + } + numRetries-- + return shouldRetry(err) + } +} + +// ToMulticast makes 'src' a multicast observable, e.g. each observer will observe +// the same sequence. Useful for fanning out items to multiple observers from a source +// that is consumed by the act of observing. +// +// mcast, connect := ToMulticast(FromChannel(values)) +// a := ToSlice(mcast) +// b := ToSlice(mcast) +// connect(ctx) // start! +// => a == b +func ToMulticast[T any](src Observable[T], opts ...MulticastOpt) (mcast Observable[T], connect func(context.Context)) { + mcast, next, complete := Multicast[T](opts...) + connect = func(ctx context.Context) { + src.Observe(ctx, next, complete) + } + return mcast, connect +} + +// Throttle limits the rate at which items are emitted. +func Throttle[T any](src Observable[T], ratePerSecond float64, burst int) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + limiter := rate.NewLimiter(rate.Limit(ratePerSecond), burst) + var limiterErr error + subCtx, cancel := context.WithCancel(ctx) + src.Observe( + subCtx, + func(item T) { + limiterErr = limiter.Wait(ctx) + if limiterErr != nil { + cancel() + return + } + next(item) + }, + func(err error) { + if limiterErr != nil { + complete(limiterErr) + } else { + complete(err) + } + + }, + ) + }) +} + +// Debounce emits an item only after the specified duration has lapsed since +// the previous item was emitted. Only the latest item is emitted. +// +// In: a b c d e |-> +// Out: a d e |-> +func Debounce[T any](src Observable[T], duration time.Duration) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + errs := make(chan error, 1) + items := ToChannel(ctx, src, WithErrorChan(errs)) + go func() { + defer close(errs) + + timer := time.NewTimer(duration) + defer timer.Stop() + + timerElapsed := true // Do not delay the first item. + var latest *T + + for { + select { + case err := <-errs: + complete(err) + return + + case item, ok := <-items: + if !ok { + items = nil + latest = nil + continue + } + + if timerElapsed { + next(item) + timerElapsed = false + latest = nil + timer.Reset(duration) + } else { + latest = &item + } + + case <-timer.C: + if latest != nil { + next(*latest) + latest = nil + timer.Reset(duration) + } else { + timerElapsed = true + } + } + } + }() + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/stream/sinks.go b/vendor/github.com/cilium/cilium/pkg/stream/sinks.go new file mode 100644 index 0000000000..23c5ee2270 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/stream/sinks.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package stream + +import ( + "context" + "io" + "sync" + "sync/atomic" +) + +// +// Sinks: operators that consume the observable to produce a value. +// + +// First returns the first item from 'src' observable and then cancels +// the subscription. Blocks until first item is observed or the stream +// is completed. If the observable completes without emitting items +// then io.EOF error is returned. +func First[T any](ctx context.Context, src Observable[T]) (item T, err error) { + subCtx, cancel := context.WithCancel(ctx) + var taken atomic.Bool + errs := make(chan error) + src.Observe(subCtx, + func(x T) { + if !taken.CompareAndSwap(false, true) { + return + } + item = x + cancel() + }, + func(err error) { + errs <- err + close(errs) + }) + + err = <-errs + + if taken.Load() { + // We got the item, ignore any error. + err = nil + } else if err == nil { + // No error and no item => EOF + err = io.EOF + } + + return +} + +// Last returns the last item from 'src' observable. Blocks until +// the stream has been completed. If no items are observed then +// io.EOF error is returned. +func Last[T any](ctx context.Context, src Observable[T]) (item T, err error) { + errs := make(chan error) + var taken atomic.Bool + src.Observe( + ctx, + func(x T) { + item = x + taken.Store(true) + }, + func(err error) { + errs <- err + close(errs) + }) + + err = <-errs + if taken.Load() { + // We got the item, ignore any error. + err = nil + } else if err == nil { + // No error and no item => EOF + err = io.EOF + } + return item, err +} + +// ToSlice converts an Observable into a slice. +// +// ToSlice(ctx, Range(1,4)) +// => ([]int{1,2,3}, nil) +func ToSlice[T any](ctx context.Context, src Observable[T]) (items []T, err error) { + errs := make(chan error) + items = make([]T, 0) + src.Observe( + ctx, + func(item T) { + items = append(items, item) + }, + func(err error) { + errs <- err + close(errs) + }) + return items, <-errs +} + +type toChannelOpts struct { + bufferSize int + errorChan chan error +} + +type ToChannelOpt func(*toChannelOpts) + +// WithBufferSize sets the buffer size of the channel returned by ToChannel. +func WithBufferSize(n int) ToChannelOpt { + return func(o *toChannelOpts) { + o.bufferSize = n + } +} + +// WithErrorChan asks ToChannel to send completion error to the provided channel. +func WithErrorChan(errCh chan error) ToChannelOpt { + return func(o *toChannelOpts) { + o.errorChan = errCh + } +} + +// ToChannel converts an observable into a channel. +// When the provided context is cancelled the underlying subscription is cancelled +// and the channel is closed. To receive completion errors use [WithErrorChan]. +// +// items <- ToChannel(ctx, Range(1,4)) +// a := <- items +// b := <- items +// c := <- items +// _, ok := <- items +// => a=1, b=2, c=3, ok=false +func ToChannel[T any](ctx context.Context, src Observable[T], opts ...ToChannelOpt) <-chan T { + var o toChannelOpts + for _, opt := range opts { + opt(&o) + } + items := make(chan T, o.bufferSize) + src.Observe( + ctx, + func(item T) { items <- item }, + func(err error) { + close(items) + if o.errorChan != nil { + o.errorChan <- err + } + }) + return items +} + +// Discard discards all items from 'src'. +func Discard[T any](ctx context.Context, src Observable[T]) { + src.Observe(ctx, + func(item T) {}, + func(err error) {}) +} + +// ObserveWithWaitGroup is like Observe(), but adds to a WaitGroup and calls +// Done() when complete. +func ObserveWithWaitGroup[T any](ctx context.Context, wg *sync.WaitGroup, src Observable[T], next func(T), complete func(error)) { + wg.Add(1) + src.Observe( + ctx, + next, + func(err error) { + complete(err) + wg.Done() + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/stream/sources.go b/vendor/github.com/cilium/cilium/pkg/stream/sources.go new file mode 100644 index 0000000000..a9e3f06e24 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/stream/sources.go @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package stream + +import ( + "context" + + "github.com/cilium/cilium/pkg/lock" +) + +// Just creates an observable that emits a single item and completes. +// +// xs, err := ToSlice(ctx, Just(1)) +// => xs == []int{1}, err == nil +func Just[T any](item T) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + if err := ctx.Err(); err != nil { + complete(err) + } else { + next(item) + complete(nil) + } + }() + }) +} + +// Stuck creates an observable that never emits anything and +// just waits for the context to be cancelled. +// Mainly meant for testing. +func Stuck[T any]() Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + <-ctx.Done() + complete(ctx.Err()) + }() + }) +} + +// Error creates an observable that fails immediately with given error. +// +// failErr = errors.New("fail") +// xs, err := ToSlice(ctx, Error[int](failErr)) +// => xs == []int{}, err == failErr +func Error[T any](err error) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go complete(err) + }) +} + +// Empty creates an "empty" observable that completes immediately. +// +// xs, err := ToSlice(Empty[int]()) +// => xs == []int{}, err == nil +func Empty[T any]() Observable[T] { + return Error[T](nil) +} + +// FromSlice converts a slice into an Observable. +// +// ToSlice(ctx, FromSlice([]int{1,2,3}) +// => []int{1,2,3} +func FromSlice[T any](items []T) Observable[T] { + // Emit items in chunks to reduce overhead of mutex in ctx.Err(). + const chunkSize = 64 + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + for chunk := 0; chunk < len(items); chunk += chunkSize { + if err := ctx.Err(); err != nil { + complete(err) + return + } + for i := chunk; i < len(items) && i < chunk+chunkSize; i++ { + next(items[i]) + } + } + complete(nil) + }() + }) +} + +// FromChannel creates an observable from a channel. The channel is consumed +// by the first observer. +// +// values := make(chan int) +// go func() { +// values <- 1 +// values <- 2 +// values <- 3 +// close(values) +// }() +// obs := FromChannel(values) +// xs, err := ToSlice(ctx, obs) +// => xs == []int{1,2,3}, err == nil +// +// xs, err = ToSlice(ctx, obs) +// => xs == []int{}, err == nil +func FromChannel[T any](in <-chan T) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + done := ctx.Done() + for { + select { + case <-done: + complete(ctx.Err()) + return + case v, ok := <-in: + if !ok { + complete(nil) + return + } + next(v) + } + } + }() + }) +} + +// Range creates an observable that emits integers in range from...to-1. +// +// ToSlice(ctx, Range(1,2,3)) => []int{1,2,3} +func Range(from, to int) Observable[int] { + return FuncObservable[int]( + func(ctx context.Context, next func(int), complete func(error)) { + go func() { + for i := from; i < to; i++ { + if ctx.Err() != nil { + break + } + next(i) + } + complete(ctx.Err()) + }() + }) +} + +type mcastSubscriber[T any] struct { + next func(T) + complete func() +} + +type MulticastOpt func(o *mcastOpts) + +type mcastOpts struct { + emitLatest bool +} + +func (o mcastOpts) apply(opts []MulticastOpt) mcastOpts { + for _, opt := range opts { + opt(&o) + } + return o +} + +// Multicast options +var ( + // Emit the latest seen item when subscribing. + EmitLatest = func(o *mcastOpts) { o.emitLatest = true } +) + +// Multicast creates an observable that "multicasts" the emitted items to all observers. +// +// mcast, next, complete := Multicast[int]() +// next(1) // no observers, none receives this +// sub1 := ToChannel(ctx, mcast, WithBufferSize(10)) +// sub2 := ToChannel(ctx, mcast, WithBufferSize(10)) +// next(2) +// next(3) +// complete(nil) +// => sub1 == sub2 == [2,3] +// +// mcast, next, complete = Multicast[int](EmitLatest) +// next(1) +// next(2) // "EmitLatest" tells Multicast to keep this +// x, err := First(ctx, mcast) +// => x == 2, err == nil +func Multicast[T any](opts ...MulticastOpt) (mcast Observable[T], next func(T), complete func(error)) { + var ( + mu lock.Mutex + subId int + subs = make(map[int]mcastSubscriber[T]) + latestValue T + completed bool + completeErr error + haveLatest bool + opt = mcastOpts{}.apply(opts) + ) + + next = func(item T) { + mu.Lock() + defer mu.Unlock() + if completed { + return + } + if opt.emitLatest { + latestValue = item + haveLatest = true + } + for _, sub := range subs { + sub.next(item) + } + } + + complete = func(err error) { + mu.Lock() + defer mu.Unlock() + completed = true + completeErr = err + for _, sub := range subs { + sub.complete() + } + subs = nil + } + + mcast = FuncObservable[T]( + func(ctx context.Context, subNext func(T), subComplete func(error)) { + mu.Lock() + if completed { + mu.Unlock() + go subComplete(completeErr) + return + } + + subCtx, cancel := context.WithCancel(ctx) + thisId := subId + subId++ + subs[thisId] = mcastSubscriber[T]{ + subNext, + cancel, + } + + // Continue subscribing asynchronously so caller is not blocked. + go func() { + if opt.emitLatest && haveLatest { + subNext(latestValue) + } + mu.Unlock() + + // Wait for cancellation by observer, or completion from upstream. + <-subCtx.Done() + + // Remove the observer and complete. + var err error + mu.Lock() + delete(subs, thisId) + if completed { + err = completeErr + } else { + err = subCtx.Err() + } + mu.Unlock() + subComplete(err) + }() + }) + + return +} diff --git a/vendor/github.com/cilium/cilium/pkg/time/time.go b/vendor/github.com/cilium/cilium/pkg/time/time.go new file mode 100644 index 0000000000..694b21f031 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/time/time.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// package time is a wrapper for the stdlib time library that aliases most +// underlying types, but allows overrides for testing purposes. +// +// Synced to go-1.20.7. +package time + +import ( + "time" +) + +const ( + Layout = time.Layout + ANSIC = time.ANSIC + UnixDate = time.UnixDate + RubyDate = time.RubyDate + RFC822 = time.RFC822 + RFC822Z = time.RFC822Z + RFC850 = time.RFC850 + RFC1123 = time.RFC1123 + RFC1123Z = time.RFC1123Z + RFC3339 = time.RFC3339 + RFC3339Nano = time.RFC3339Nano + Kitchen = time.Kitchen + Stamp = time.Stamp + StampMilli = time.StampMilli + StampMicro = time.StampMicro + StampNano = time.StampNano + DateTime = time.DateTime + DateOnly = time.DateOnly + TimeOnly = time.TimeOnly + + Nanosecond = time.Nanosecond + Microsecond = time.Microsecond + Millisecond = time.Millisecond + Second = time.Second + Minute = time.Minute + Hour = time.Hour +) + +var ( + ParseDuration = time.ParseDuration + Since = time.Since + Until = time.Until + FixedZone = time.FixedZone + LoadLocation = time.LoadLocation + LoadLocationFromTZData = time.LoadLocationFromTZData + Date = time.Date + Now = time.Now + Parse = time.Parse + ParseInLocation = time.ParseInLocation + Unix = time.Unix + UnixMicro = time.UnixMicro + UnixMilli = time.UnixMilli +) + +type ( + Duration = time.Duration + Location = time.Location + Month = time.Month + ParseError = time.ParseError + Ticker = time.Ticker + Time = time.Time + Timer = time.Timer + Weekday = time.Weekday +) + +var ( + MaxInternalTimerDelay time.Duration +) + +// After overrides the stdlib time.After to enforce maximum sleepiness via +// option.MaxInternalTimerDelay. +func After(d Duration) <-chan Time { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.After(d) +} + +// Sleep overrides the stdlib time.Sleep to enforce maximum sleepiness via +// option.MaxInternalTimerDelay. +func Sleep(d time.Duration) { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + time.Sleep(d) +} + +// Tick overrides the stdlib time.Tick to enforce maximum sleepiness via +// option.MaxInternalTimerDelay. +func Tick(d Duration) <-chan time.Time { + return NewTicker(d).C +} + +// NewTicker overrides the stdlib time.NewTicker to enforce maximum sleepiness +// via option.MaxInternalTimerDelay. +func NewTicker(d Duration) *time.Ticker { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.NewTicker(d) +} + +// NewTimer overrides the stdlib time.NewTimer to enforce maximum sleepiness +// via option.MaxInternalTimerDelay. +func NewTimer(d Duration) *time.Timer { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.NewTimer(d) +} + +// NewTimerWithoutMaxDelay returns a time.NewTimer without enforcing maximum +// sleepiness. This function should only be used in cases where the timer firing +// early impacts correctness. If in doubt, you probably should use NewTimer. +func NewTimerWithoutMaxDelay(d Duration) *time.Timer { + return time.NewTimer(d) +} + +// AfterFunc overrides the stdlib time.AfterFunc to enforce maximum sleepiness +// via option.MaxInternalTimerDelay. +func AfterFunc(d Duration, f func()) *time.Timer { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.AfterFunc(d, f) +} diff --git a/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go b/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go index 6b5e34534f..88474cfa7e 100644 --- a/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go +++ b/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go @@ -20,7 +20,7 @@ import ( func MustCompile(constraint string) semver.Range { verCheck, err := Compile(constraint) if err != nil { - panic(fmt.Errorf("cannot compile go-version constraint '%s' %s", constraint, err)) + panic(fmt.Errorf("cannot compile go-version constraint '%s': %w", constraint, err)) } return verCheck } @@ -36,7 +36,7 @@ func Compile(constraint string) (semver.Range, error) { func MustVersion(version string) semver.Version { ver, err := Version(version) if err != nil { - panic(fmt.Errorf("cannot compile go-version version '%s' %s", version, err)) + panic(fmt.Errorf("cannot compile go-version version '%s': %w", version, err)) } return ver } diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format new file mode 100644 index 0000000000..0ff4257606 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.clang-format @@ -0,0 +1,25 @@ +--- +Language: Cpp +BasedOnStyle: LLVM +AlignAfterOpenBracket: DontAlign +AlignConsecutiveAssignments: true +AlignEscapedNewlines: DontAlign +# mkdocs annotations in source code are written as trailing comments +# and alignment pushes these really far away from the content. +AlignTrailingComments: false +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: false +BreakBeforeBraces: Attach +IndentWidth: 4 +KeepEmptyLinesAtTheStartOfBlocks: false +TabWidth: 4 +UseTab: ForContinuationAndIndentation +ColumnLimit: 1000 +# Go compiler comments need to stay unindented. +CommentPragmas: '^go:.*' +# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64 +# and sorting makes this impossible. +SortIncludes: false +... diff --git a/vendor/github.com/cilium/ebpf/.gitattributes b/vendor/github.com/cilium/ebpf/.gitattributes new file mode 100644 index 0000000000..113f97b980 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitattributes @@ -0,0 +1 @@ +internal/sys/types.go linguist-generated=false diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore new file mode 100644 index 0000000000..b46162b8ec --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.o +!*_bpf*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml new file mode 100644 index 0000000000..65f91b910b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -0,0 +1,13 @@ +--- +linters: + disable-all: true + enable: + - goimports + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - typecheck + - unused + - gofmt diff --git a/vendor/github.com/cilium/ebpf/.vimto.toml b/vendor/github.com/cilium/ebpf/.vimto.toml new file mode 100644 index 0000000000..49a12dbc09 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.vimto.toml @@ -0,0 +1,12 @@ +kernel="ghcr.io/cilium/ci-kernels:stable" +smp="cpus=2" +memory="1G" +user="root" +setup=[ + "mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup", + "/bin/sh -c 'modprobe bpf_testmod || true'", + "dmesg --clear", +] +teardown=[ + "dmesg --read-clear", +] diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS new file mode 100644 index 0000000000..ca65d23c09 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -0,0 +1,11 @@ +* @cilium/ebpf-lib-maintainers + +features/ @rgo3 +link/ @mmat11 + +perf/ @florianl +ringbuf/ @florianl + +btf/ @dylandreimerink + +cmd/bpf2go/ @mejedi diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8e42838c5a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md new file mode 100644 index 0000000000..673a9ac290 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing to ebpf-go + +Want to contribute to ebpf-go? There are a few things you need to know. + +We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started. diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE new file mode 100644 index 0000000000..c637ae99c2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 0000000000..a56a03e394 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,3 @@ +# Maintainers + +Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md) diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile new file mode 100644 index 0000000000..d355eea71c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -0,0 +1,112 @@ +# The development version of clang is distributed as the 'clang' binary, +# while stable/released versions have a version number attached. +# Pin the default clang to a stable version. +CLANG ?= clang-17 +STRIP ?= llvm-strip-17 +OBJCOPY ?= llvm-objcopy-17 +CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ + +# Obtain an absolute path to the directory of the Makefile. +# Assume the Makefile is in the root of the repository. +REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) + +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") + +IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) +VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + +TARGETS := \ + testdata/loader-clang-11 \ + testdata/loader-clang-14 \ + testdata/loader-$(CLANG) \ + testdata/manyprogs \ + testdata/btf_map_init \ + testdata/invalid_map \ + testdata/raw_tracepoint \ + testdata/invalid_map_static \ + testdata/invalid_btf_map_init \ + testdata/strings \ + testdata/freplace \ + testdata/fentry_fexit \ + testdata/iproute2_map_compat \ + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ + testdata/kconfig \ + testdata/kconfig_config \ + testdata/kfunc \ + testdata/invalid-kfunc \ + testdata/kfunc-kmod \ + testdata/constants \ + testdata/errors \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt \ + btf/testdata/relocs_enum \ + cmd/bpf2go/testdata/minimal + +.PHONY: all clean container-all container-shell generate + +.DEFAULT_TARGET = container-all + +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + +${CONTAINER_ENGINE} run --rm -t ${CONTAINER_RUN_ARGS} \ + -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ + --env HOME="/tmp" \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + "${IMAGE}:${VERSION}" \ + make all + +# (debug) Drop the user into a shell inside the container as root. +# Set BPF2GO_ envs to make 'make generate' just work. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti \ + -v "${REPODIR}":/ebpf -w /ebpf \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + "${IMAGE}:${VERSION}" + +clean: + find "$(CURDIR)" -name "*.elf" -delete + find "$(CURDIR)" -name "*.o" -delete + +format: + find . -type f -name "*.c" | xargs clang-format -i + +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate + ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf + ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf + +generate: + go generate -run "internal/cmd/gentypes" ./... + go generate -skip "internal/cmd/gentypes" ./... + +testdata/loader-%-el.elf: testdata/loader.c + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +testdata/loader-%-eb.elf: testdata/loader.c + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +%-el.elf: %.c + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +%-eb.elf : %.c + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +.PHONY: update-kernel-deps +update-kernel-deps: export KERNEL_VERSION?=6.8 +update-kernel-deps: + ./testdata/sh/update-kernel-deps.sh + $(MAKE) container-all diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md new file mode 100644 index 0000000000..85871db1ae --- /dev/null +++ b/vendor/github.com/cilium/ebpf/README.md @@ -0,0 +1,72 @@ +# eBPF + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) + +![HoneyGopher](docs/ebpf/ebpf-go.png) + +ebpf-go is a pure Go library that provides utilities for loading, compiling, and +debugging eBPF programs. It has minimal external dependencies and is intended to +be used in long running processes. + +See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF +ecosystem. + +## Getting Started + +Please take a look at our [Getting Started] guide. + +[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of +eBPF and the library, and help shape the future of the project. + +## Getting Help + +The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page. +Please search for existing threads before starting a new one. Refrain from +opening issues on the bug tracker if you're just starting out or if you're not +sure if something is a bug in the library code. + +Alternatively, [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have other questions regarding the project. Note that this channel is ephemeral +and has its history erased past a certain point, which is less helpful for +others running into the same problem later. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. +* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format. + +## Requirements + +* A version of Go that is [supported by + upstream](https://golang.org/doc/devel/release.html#policy) +* CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed versions + are not supported. + +## License + +MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. + +[Getting Started]: https://ebpf-go.dev/guides/getting-started/ diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go new file mode 100644 index 0000000000..282233d327 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu.go @@ -0,0 +1,180 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output alu_string.go -type=Source,Endianness,ALUOp + +// Source of ALU / ALU64 / Branch operations +// +// msb lsb +// +------------+-+---+ +// | op |S|cls| +// +------------+-+---+ +type Source uint16 + +const sourceMask OpCode = 0x0008 + +// Source bitmask +const ( + // InvalidSource is returned by getters when invoked + // on non ALU / branch OpCodes. + InvalidSource Source = 0xffff + // ImmSource src is from constant + ImmSource Source = 0x0000 + // RegSource src is from register + RegSource Source = 0x0008 +) + +// The Endianness of a byte swap instruction. +type Endianness uint8 + +const endianMask = sourceMask + +// Endian flags +const ( + InvalidEndian Endianness = 0xff + // Convert to little endian + LE Endianness = 0x00 + // Convert to big endian + BE Endianness = 0x08 +) + +// ALUOp are ALU / ALU64 operations +// +// msb lsb +// +-------+----+-+---+ +// | EXT | OP |s|cls| +// +-------+----+-+---+ +type ALUOp uint16 + +const aluMask OpCode = 0x3ff0 + +const ( + // InvalidALUOp is returned by getters when invoked + // on non ALU OpCodes + InvalidALUOp ALUOp = 0xffff + // Add - addition + Add ALUOp = 0x0000 + // Sub - subtraction + Sub ALUOp = 0x0010 + // Mul - multiplication + Mul ALUOp = 0x0020 + // Div - division + Div ALUOp = 0x0030 + // SDiv - signed division + SDiv ALUOp = Div + 0x0100 + // Or - bitwise or + Or ALUOp = 0x0040 + // And - bitwise and + And ALUOp = 0x0050 + // LSh - bitwise shift left + LSh ALUOp = 0x0060 + // RSh - bitwise shift right + RSh ALUOp = 0x0070 + // Neg - sign/unsign signing bit + Neg ALUOp = 0x0080 + // Mod - modulo + Mod ALUOp = 0x0090 + // SMod - signed modulo + SMod ALUOp = Mod + 0x0100 + // Xor - bitwise xor + Xor ALUOp = 0x00a0 + // Mov - move value from one place to another + Mov ALUOp = 0x00b0 + // MovSX8 - move lower 8 bits, sign extended upper bits of target + MovSX8 ALUOp = Mov + 0x0100 + // MovSX16 - move lower 16 bits, sign extended upper bits of target + MovSX16 ALUOp = Mov + 0x0200 + // MovSX32 - move lower 32 bits, sign extended upper bits of target + MovSX32 ALUOp = Mov + 0x0300 + // ArSh - arithmetic shift + ArSh ALUOp = 0x00c0 + // Swap - endian conversions + Swap ALUOp = 0x00d0 +) + +// HostTo converts from host to another endianness. +func HostTo(endian Endianness, dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), + Dst: dst, + Constant: imm, + } +} + +// BSwap unconditionally reverses the order of bytes in a register. +func BSwap(dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALU64Class).SetALUOp(Swap), + Dst: dst, + Constant: imm, + } +} + +// Op returns the OpCode for an ALU operation with a given source. +func (op ALUOp) Op(source Source) OpCode { + return OpCode(ALU64Class).SetALUOp(op).SetSource(source) +} + +// Reg emits `dst (op) src`. +func (op ALUOp) Reg(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm emits `dst (op) value`. +func (op ALUOp) Imm(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op(ImmSource), + Dst: dst, + Constant: int64(value), + } +} + +// Op32 returns the OpCode for a 32-bit ALU operation with a given source. +func (op ALUOp) Op32(source Source) OpCode { + return OpCode(ALUClass).SetALUOp(op).SetSource(source) +} + +// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. +func (op ALUOp) Reg32(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op32(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. +func (op ALUOp) Imm32(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op32(ImmSource), + Dst: dst, + Constant: int64(value), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go new file mode 100644 index 0000000000..35b406bf3f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -0,0 +1,117 @@ +// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSource-65535] + _ = x[ImmSource-0] + _ = x[RegSource-8] +} + +const ( + _Source_name_0 = "ImmSource" + _Source_name_1 = "RegSource" + _Source_name_2 = "InvalidSource" +) + +func (i Source) String() string { + switch { + case i == 0: + return _Source_name_0 + case i == 8: + return _Source_name_1 + case i == 65535: + return _Source_name_2 + default: + return "Source(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidEndian-255] + _ = x[LE-0] + _ = x[BE-8] +} + +const ( + _Endianness_name_0 = "LE" + _Endianness_name_1 = "BE" + _Endianness_name_2 = "InvalidEndian" +) + +func (i Endianness) String() string { + switch { + case i == 0: + return _Endianness_name_0 + case i == 8: + return _Endianness_name_1 + case i == 255: + return _Endianness_name_2 + default: + return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidALUOp-65535] + _ = x[Add-0] + _ = x[Sub-16] + _ = x[Mul-32] + _ = x[Div-48] + _ = x[SDiv-304] + _ = x[Or-64] + _ = x[And-80] + _ = x[LSh-96] + _ = x[RSh-112] + _ = x[Neg-128] + _ = x[Mod-144] + _ = x[SMod-400] + _ = x[Xor-160] + _ = x[Mov-176] + _ = x[MovSX8-432] + _ = x[MovSX16-688] + _ = x[MovSX32-944] + _ = x[ArSh-192] + _ = x[Swap-208] +} + +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp" + +var _ALUOp_map = map[ALUOp]string{ + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 304: _ALUOp_name[43:47], + 400: _ALUOp_name[47:51], + 432: _ALUOp_name[51:57], + 688: _ALUOp_name[57:64], + 944: _ALUOp_name[64:71], + 65535: _ALUOp_name[71:83], +} + +func (i ALUOp) String() string { + if str, ok := _ALUOp_map[i]; ok { + return str + } + return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go new file mode 100644 index 0000000000..7031bdc276 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/doc.go @@ -0,0 +1,2 @@ +// Package asm is an assembler for eBPF bytecode. +package asm diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go new file mode 100644 index 0000000000..84a40b2277 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -0,0 +1,250 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc + +// BuiltinFunc is a built-in eBPF function. +type BuiltinFunc int32 + +func (_ BuiltinFunc) Max() BuiltinFunc { + return maxBuiltinFunc - 1 +} + +// eBPF built-in functions +// +// You can regenerate this list using the following gawk script: +// +// /FN\(.+\),/ { +// match($1, /\(([a-z_0-9]+),/, r) +// split(r[1], p, "_") +// printf "Fn" +// for (i in p) { +// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) +// } +// print "" +// } +// +// The script expects include/uapi/linux/bpf.h as it's input. +const ( + FnUnspec BuiltinFunc = iota + FnMapLookupElem + FnMapUpdateElem + FnMapDeleteElem + FnProbeRead + FnKtimeGetNs + FnTracePrintk + FnGetPrandomU32 + FnGetSmpProcessorId + FnSkbStoreBytes + FnL3CsumReplace + FnL4CsumReplace + FnTailCall + FnCloneRedirect + FnGetCurrentPidTgid + FnGetCurrentUidGid + FnGetCurrentComm + FnGetCgroupClassid + FnSkbVlanPush + FnSkbVlanPop + FnSkbGetTunnelKey + FnSkbSetTunnelKey + FnPerfEventRead + FnRedirect + FnGetRouteRealm + FnPerfEventOutput + FnSkbLoadBytes + FnGetStackid + FnCsumDiff + FnSkbGetTunnelOpt + FnSkbSetTunnelOpt + FnSkbChangeProto + FnSkbChangeType + FnSkbUnderCgroup + FnGetHashRecalc + FnGetCurrentTask + FnProbeWriteUser + FnCurrentTaskUnderCgroup + FnSkbChangeTail + FnSkbPullData + FnCsumUpdate + FnSetHashInvalid + FnGetNumaNodeId + FnSkbChangeHead + FnXdpAdjustHead + FnProbeReadStr + FnGetSocketCookie + FnGetSocketUid + FnSetHash + FnSetsockopt + FnSkbAdjustRoom + FnRedirectMap + FnSkRedirectMap + FnSockMapUpdate + FnXdpAdjustMeta + FnPerfEventReadValue + FnPerfProgReadValue + FnGetsockopt + FnOverrideReturn + FnSockOpsCbFlagsSet + FnMsgRedirectMap + FnMsgApplyBytes + FnMsgCorkBytes + FnMsgPullData + FnBind + FnXdpAdjustTail + FnSkbGetXfrmState + FnGetStack + FnSkbLoadBytesRelative + FnFibLookup + FnSockHashUpdate + FnMsgRedirectHash + FnSkRedirectHash + FnLwtPushEncap + FnLwtSeg6StoreBytes + FnLwtSeg6AdjustSrh + FnLwtSeg6Action + FnRcRepeat + FnRcKeydown + FnSkbCgroupId + FnGetCurrentCgroupId + FnGetLocalStorage + FnSkSelectReuseport + FnSkbAncestorCgroupId + FnSkLookupTcp + FnSkLookupUdp + FnSkRelease + FnMapPushElem + FnMapPopElem + FnMapPeekElem + FnMsgPushData + FnMsgPopData + FnRcPointerRel + FnSpinLock + FnSpinUnlock + FnSkFullsock + FnTcpSock + FnSkbEcnSetCe + FnGetListenerSock + FnSkcLookupTcp + FnTcpCheckSyncookie + FnSysctlGetName + FnSysctlGetCurrentValue + FnSysctlGetNewValue + FnSysctlSetNewValue + FnStrtol + FnStrtoul + FnSkStorageGet + FnSkStorageDelete + FnSendSignal + FnTcpGenSyncookie + FnSkbOutput + FnProbeReadUser + FnProbeReadKernel + FnProbeReadUserStr + FnProbeReadKernelStr + FnTcpSendAck + FnSendSignalThread + FnJiffies64 + FnReadBranchRecords + FnGetNsCurrentPidTgid + FnXdpOutput + FnGetNetnsCookie + FnGetCurrentAncestorCgroupId + FnSkAssign + FnKtimeGetBootNs + FnSeqPrintf + FnSeqWrite + FnSkCgroupId + FnSkAncestorCgroupId + FnRingbufOutput + FnRingbufReserve + FnRingbufSubmit + FnRingbufDiscard + FnRingbufQuery + FnCsumLevel + FnSkcToTcp6Sock + FnSkcToTcpSock + FnSkcToTcpTimewaitSock + FnSkcToTcpRequestSock + FnSkcToUdp6Sock + FnGetTaskStack + FnLoadHdrOpt + FnStoreHdrOpt + FnReserveHdrOpt + FnInodeStorageGet + FnInodeStorageDelete + FnDPath + FnCopyFromUser + FnSnprintfBtf + FnSeqPrintfBtf + FnSkbCgroupClassid + FnRedirectNeigh + FnPerCpuPtr + FnThisCpuPtr + FnRedirectPeer + FnTaskStorageGet + FnTaskStorageDelete + FnGetCurrentTaskBtf + FnBprmOptsSet + FnKtimeGetCoarseNs + FnImaInodeHash + FnSockFromFile + FnCheckMtu + FnForEachMapElem + FnSnprintf + FnSysBpf + FnBtfFindByNameKind + FnSysClose + FnTimerInit + FnTimerSetCallback + FnTimerStart + FnTimerCancel + FnGetFuncIp + FnGetAttachCookie + FnTaskPtRegs + FnGetBranchSnapshot + FnTraceVprintk + FnSkcToUnixSock + FnKallsymsLookupName + FnFindVma + FnLoop + FnStrncmp + FnGetFuncArg + FnGetFuncRet + FnGetFuncArgCnt + FnGetRetval + FnSetRetval + FnXdpGetBuffLen + FnXdpLoadBytes + FnXdpStoreBytes + FnCopyFromUserTask + FnSkbSetTstamp + FnImaFileHash + FnKptrXchg + FnMapLookupPercpuElem + FnSkcToMptcpSock + FnDynptrFromMem + FnRingbufReserveDynptr + FnRingbufSubmitDynptr + FnRingbufDiscardDynptr + FnDynptrRead + FnDynptrWrite + FnDynptrData + FnTcpRawGenSyncookieIpv4 + FnTcpRawGenSyncookieIpv6 + FnTcpRawCheckSyncookieIpv4 + FnTcpRawCheckSyncookieIpv6 + FnKtimeGetTaiNs + FnUserRingbufDrain + FnCgrpStorageGet + FnCgrpStorageDelete + + maxBuiltinFunc +) + +// Call emits a function call. +func (fn BuiltinFunc) Call() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Constant: int64(fn), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go new file mode 100644 index 0000000000..47150bc4f2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -0,0 +1,235 @@ +// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FnUnspec-0] + _ = x[FnMapLookupElem-1] + _ = x[FnMapUpdateElem-2] + _ = x[FnMapDeleteElem-3] + _ = x[FnProbeRead-4] + _ = x[FnKtimeGetNs-5] + _ = x[FnTracePrintk-6] + _ = x[FnGetPrandomU32-7] + _ = x[FnGetSmpProcessorId-8] + _ = x[FnSkbStoreBytes-9] + _ = x[FnL3CsumReplace-10] + _ = x[FnL4CsumReplace-11] + _ = x[FnTailCall-12] + _ = x[FnCloneRedirect-13] + _ = x[FnGetCurrentPidTgid-14] + _ = x[FnGetCurrentUidGid-15] + _ = x[FnGetCurrentComm-16] + _ = x[FnGetCgroupClassid-17] + _ = x[FnSkbVlanPush-18] + _ = x[FnSkbVlanPop-19] + _ = x[FnSkbGetTunnelKey-20] + _ = x[FnSkbSetTunnelKey-21] + _ = x[FnPerfEventRead-22] + _ = x[FnRedirect-23] + _ = x[FnGetRouteRealm-24] + _ = x[FnPerfEventOutput-25] + _ = x[FnSkbLoadBytes-26] + _ = x[FnGetStackid-27] + _ = x[FnCsumDiff-28] + _ = x[FnSkbGetTunnelOpt-29] + _ = x[FnSkbSetTunnelOpt-30] + _ = x[FnSkbChangeProto-31] + _ = x[FnSkbChangeType-32] + _ = x[FnSkbUnderCgroup-33] + _ = x[FnGetHashRecalc-34] + _ = x[FnGetCurrentTask-35] + _ = x[FnProbeWriteUser-36] + _ = x[FnCurrentTaskUnderCgroup-37] + _ = x[FnSkbChangeTail-38] + _ = x[FnSkbPullData-39] + _ = x[FnCsumUpdate-40] + _ = x[FnSetHashInvalid-41] + _ = x[FnGetNumaNodeId-42] + _ = x[FnSkbChangeHead-43] + _ = x[FnXdpAdjustHead-44] + _ = x[FnProbeReadStr-45] + _ = x[FnGetSocketCookie-46] + _ = x[FnGetSocketUid-47] + _ = x[FnSetHash-48] + _ = x[FnSetsockopt-49] + _ = x[FnSkbAdjustRoom-50] + _ = x[FnRedirectMap-51] + _ = x[FnSkRedirectMap-52] + _ = x[FnSockMapUpdate-53] + _ = x[FnXdpAdjustMeta-54] + _ = x[FnPerfEventReadValue-55] + _ = x[FnPerfProgReadValue-56] + _ = x[FnGetsockopt-57] + _ = x[FnOverrideReturn-58] + _ = x[FnSockOpsCbFlagsSet-59] + _ = x[FnMsgRedirectMap-60] + _ = x[FnMsgApplyBytes-61] + _ = x[FnMsgCorkBytes-62] + _ = x[FnMsgPullData-63] + _ = x[FnBind-64] + _ = x[FnXdpAdjustTail-65] + _ = x[FnSkbGetXfrmState-66] + _ = x[FnGetStack-67] + _ = x[FnSkbLoadBytesRelative-68] + _ = x[FnFibLookup-69] + _ = x[FnSockHashUpdate-70] + _ = x[FnMsgRedirectHash-71] + _ = x[FnSkRedirectHash-72] + _ = x[FnLwtPushEncap-73] + _ = x[FnLwtSeg6StoreBytes-74] + _ = x[FnLwtSeg6AdjustSrh-75] + _ = x[FnLwtSeg6Action-76] + _ = x[FnRcRepeat-77] + _ = x[FnRcKeydown-78] + _ = x[FnSkbCgroupId-79] + _ = x[FnGetCurrentCgroupId-80] + _ = x[FnGetLocalStorage-81] + _ = x[FnSkSelectReuseport-82] + _ = x[FnSkbAncestorCgroupId-83] + _ = x[FnSkLookupTcp-84] + _ = x[FnSkLookupUdp-85] + _ = x[FnSkRelease-86] + _ = x[FnMapPushElem-87] + _ = x[FnMapPopElem-88] + _ = x[FnMapPeekElem-89] + _ = x[FnMsgPushData-90] + _ = x[FnMsgPopData-91] + _ = x[FnRcPointerRel-92] + _ = x[FnSpinLock-93] + _ = x[FnSpinUnlock-94] + _ = x[FnSkFullsock-95] + _ = x[FnTcpSock-96] + _ = x[FnSkbEcnSetCe-97] + _ = x[FnGetListenerSock-98] + _ = x[FnSkcLookupTcp-99] + _ = x[FnTcpCheckSyncookie-100] + _ = x[FnSysctlGetName-101] + _ = x[FnSysctlGetCurrentValue-102] + _ = x[FnSysctlGetNewValue-103] + _ = x[FnSysctlSetNewValue-104] + _ = x[FnStrtol-105] + _ = x[FnStrtoul-106] + _ = x[FnSkStorageGet-107] + _ = x[FnSkStorageDelete-108] + _ = x[FnSendSignal-109] + _ = x[FnTcpGenSyncookie-110] + _ = x[FnSkbOutput-111] + _ = x[FnProbeReadUser-112] + _ = x[FnProbeReadKernel-113] + _ = x[FnProbeReadUserStr-114] + _ = x[FnProbeReadKernelStr-115] + _ = x[FnTcpSendAck-116] + _ = x[FnSendSignalThread-117] + _ = x[FnJiffies64-118] + _ = x[FnReadBranchRecords-119] + _ = x[FnGetNsCurrentPidTgid-120] + _ = x[FnXdpOutput-121] + _ = x[FnGetNetnsCookie-122] + _ = x[FnGetCurrentAncestorCgroupId-123] + _ = x[FnSkAssign-124] + _ = x[FnKtimeGetBootNs-125] + _ = x[FnSeqPrintf-126] + _ = x[FnSeqWrite-127] + _ = x[FnSkCgroupId-128] + _ = x[FnSkAncestorCgroupId-129] + _ = x[FnRingbufOutput-130] + _ = x[FnRingbufReserve-131] + _ = x[FnRingbufSubmit-132] + _ = x[FnRingbufDiscard-133] + _ = x[FnRingbufQuery-134] + _ = x[FnCsumLevel-135] + _ = x[FnSkcToTcp6Sock-136] + _ = x[FnSkcToTcpSock-137] + _ = x[FnSkcToTcpTimewaitSock-138] + _ = x[FnSkcToTcpRequestSock-139] + _ = x[FnSkcToUdp6Sock-140] + _ = x[FnGetTaskStack-141] + _ = x[FnLoadHdrOpt-142] + _ = x[FnStoreHdrOpt-143] + _ = x[FnReserveHdrOpt-144] + _ = x[FnInodeStorageGet-145] + _ = x[FnInodeStorageDelete-146] + _ = x[FnDPath-147] + _ = x[FnCopyFromUser-148] + _ = x[FnSnprintfBtf-149] + _ = x[FnSeqPrintfBtf-150] + _ = x[FnSkbCgroupClassid-151] + _ = x[FnRedirectNeigh-152] + _ = x[FnPerCpuPtr-153] + _ = x[FnThisCpuPtr-154] + _ = x[FnRedirectPeer-155] + _ = x[FnTaskStorageGet-156] + _ = x[FnTaskStorageDelete-157] + _ = x[FnGetCurrentTaskBtf-158] + _ = x[FnBprmOptsSet-159] + _ = x[FnKtimeGetCoarseNs-160] + _ = x[FnImaInodeHash-161] + _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[FnTcpRawGenSyncookieIpv4-204] + _ = x[FnTcpRawGenSyncookieIpv6-205] + _ = x[FnTcpRawCheckSyncookieIpv4-206] + _ = x[FnTcpRawCheckSyncookieIpv6-207] + _ = x[FnKtimeGetTaiNs-208] + _ = x[FnUserRingbufDrain-209] + _ = x[FnCgrpStorageGet-210] + _ = x[FnCgrpStorageDelete-211] + _ = x[maxBuiltinFunc-212] +} + +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc" + +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179} + +func (i BuiltinFunc) String() string { + if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go new file mode 100644 index 0000000000..67cd39d6f6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -0,0 +1,954 @@ +package asm + +import ( + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "sort" + "strings" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// InstructionSize is the size of a BPF instruction in bytes +const InstructionSize = 8 + +// RawInstructionOffset is an offset in units of raw BPF instructions. +type RawInstructionOffset uint64 + +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + +// Bytes returns the offset of an instruction in bytes. +func (rio RawInstructionOffset) Bytes() uint64 { + return uint64(rio) * InstructionSize +} + +// Instruction is a single eBPF instruction. +type Instruction struct { + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Metadata contains optional metadata about this instruction. + Metadata Metadata +} + +// Unmarshal decodes a BPF instruction. +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { + return 0, err + } + + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) + } + + ins.Offset = int16(bo.Uint16(data[2:4])) + + if ins.OpCode.Class().IsALU() { + switch ins.OpCode.ALUOp() { + case Div: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SDiv) + ins.Offset = 0 + } + case Mod: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SMod) + ins.Offset = 0 + } + case Mov: + switch ins.Offset { + case 8: + ins.OpCode = ins.OpCode.SetALUOp(MovSX8) + ins.Offset = 0 + case 16: + ins.OpCode = ins.OpCode.SetALUOp(MovSX16) + ins.Offset = 0 + case 32: + ins.OpCode = ins.OpCode.SetALUOp(MovSX32) + ins.Offset = 0 + } + } + } + + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if !ins.OpCode.IsDWordLoad() { + return InstructionSize, nil + } + + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { + // No Wrap, to avoid io.EOF clash + return 0, errors.New("64bit immediate is missing second half") + } + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { + return 0, errors.New("64bit immediate has non-zero fields") + } + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) + + return 2 * InstructionSize, nil +} + +// Marshal encodes a BPF instruction. +func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { + if ins.OpCode == InvalidOpCode { + return 0, errors.New("invalid opcode") + } + + isDWordLoad := ins.OpCode.IsDWordLoad() + + cons := int32(ins.Constant) + if isDWordLoad { + // Encode least significant 32bit first for 64bit operations. + cons = int32(uint32(ins.Constant)) + } + + regs, err := newBPFRegisters(ins.Dst, ins.Src, bo) + if err != nil { + return 0, fmt.Errorf("can't marshal registers: %s", err) + } + + if ins.OpCode.Class().IsALU() { + newOffset := int16(0) + switch ins.OpCode.ALUOp() { + case SDiv: + ins.OpCode = ins.OpCode.SetALUOp(Div) + newOffset = 1 + case SMod: + ins.OpCode = ins.OpCode.SetALUOp(Mod) + newOffset = 1 + case MovSX8: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 8 + case MovSX16: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 16 + case MovSX32: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 32 + } + if newOffset != 0 && ins.Offset != 0 { + return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) + } + ins.Offset = newOffset + } + + op, err := ins.OpCode.bpfOpCode() + if err != nil { + return 0, err + } + + data := make([]byte, InstructionSize) + data[0] = op + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + if !isDWordLoad { + return InstructionSize, nil + } + + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + return 2 * InstructionSize, nil +} + +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + +// RewriteMapPtr changes an instruction to use a new map fd. +// +// Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. +func (ins *Instruction) RewriteMapPtr(fd int) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { + // Preserve the offset value for direct map loads. + offset := uint64(ins.Constant) & (math.MaxUint32 << 32) + rawFd := uint64(uint32(fd)) + ins.Constant = int64(offset | rawFd) +} + +// MapPtr returns the map fd for this instruction. +// +// The result is undefined if the instruction is not a load from a map, +// see IsLoadFromMap. +// +// Deprecated: use Map() instead. +func (ins *Instruction) MapPtr() int { + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) +} + +// RewriteMapOffset changes the offset of a direct load from a map. +// +// Returns an error if the instruction is not a direct load. +func (ins *Instruction) RewriteMapOffset(offset uint32) error { + if !ins.OpCode.IsDWordLoad() { + return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if ins.Src != PseudoMapValue { + return errors.New("not a direct load from a map") + } + + fd := uint64(ins.Constant) & math.MaxUint32 + ins.Constant = int64(uint64(offset)<<32 | fd) + return nil +} + +func (ins *Instruction) mapOffset() uint32 { + return uint32(uint64(ins.Constant) >> 32) +} + +// IsLoadFromMap returns true if the instruction loads from a map. +// +// This covers both loading the map pointer and direct map value loads. +func (ins *Instruction) IsLoadFromMap() bool { + return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) +} + +// IsFunctionCall returns true if the instruction calls another BPF function. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsFunctionCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall +} + +// IsKfuncCall returns true if the instruction calls a kfunc. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsKfuncCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall +} + +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + +// IsConstantLoad returns true if the instruction loads a constant of the +// given size. +func (ins *Instruction) IsConstantLoad(size Size) bool { + return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 +} + +// Format implements fmt.Formatter. +func (ins Instruction) Format(f fmt.State, c rune) { + if c != 'v' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) + return + } + + op := ins.OpCode + + if op == InvalidOpCode { + fmt.Fprint(f, "INVALID") + return + } + + // Omit trailing space for Exit + if op.JumpOp() == Exit { + fmt.Fprint(f, op) + return + } + + if ins.IsLoadFromMap() { + fd := ins.mapFd() + m := ins.Map() + switch ins.Src { + case PseudoMapFD: + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } + + case PseudoMapValue: + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } + } + + goto ref + } + + switch cls := op.Class(); { + case cls.isLoadOrStore(): + fmt.Fprintf(f, "%v ", op) + switch op.Mode() { + case ImmMode: + fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) + case AbsMode: + fmt.Fprintf(f, "imm: %d", ins.Constant) + case IndMode: + fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) + case MemMode, MemSXMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) + case XAddMode: + fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + } + + case cls.IsALU(): + fmt.Fprintf(f, "%v", op) + if op == Swap.Op(ImmSource) { + fmt.Fprintf(f, "%d", ins.Constant) + } + + fmt.Fprintf(f, " dst: %s ", ins.Dst) + switch { + case op.ALUOp() == Swap: + break + case op.Source() == ImmSource: + fmt.Fprintf(f, "imm: %d", ins.Constant) + default: + fmt.Fprintf(f, "src: %s", ins.Src) + } + + case cls.IsJump(): + fmt.Fprintf(f, "%v ", op) + switch jop := op.JumpOp(); jop { + case Call: + switch ins.Src { + case PseudoCall: + // bpf-to-bpf call + fmt.Fprint(f, ins.Constant) + case PseudoKfuncCall: + // kfunc call + fmt.Fprintf(f, "Kfunc(%d)", ins.Constant) + default: + fmt.Fprint(f, BuiltinFunc(ins.Constant)) + } + + case Ja: + if ins.OpCode.Class() == Jump32Class { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "off: %d", ins.Offset) + } + + default: + fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) + if op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + } + default: + fmt.Fprintf(f, "%v ", op) + } + +ref: + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) + } +} + +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + +// WithMetadata sets the given Metadata on the Instruction. e.g. to copy +// Metadata from another Instruction when replacing it. +func (ins Instruction) WithMetadata(meta Metadata) Instruction { + ins.Metadata = meta + return ins +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + +// Instructions is an eBPF program. +type Instructions []Instruction + +// Unmarshal unmarshals an Instructions from a binary instruction stream. +// All instructions in insns are replaced by instructions decoded from r. +func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { + if len(*insns) > 0 { + *insns = nil + } + + var offset uint64 + for { + var ins Instruction + n, err := ins.Unmarshal(r, bo) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("offset %d: %w", offset, err) + } + + *insns = append(*insns, ins) + offset += n + } + + return nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol() +} + +func (insns Instructions) String() string { + return fmt.Sprint(insns) +} + +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. +// +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. +func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// SymbolOffsets returns the set of symbols and their offset in +// the instructions. +func (insns Instructions) SymbolOffsets() (map[string]int, error) { + offsets := make(map[string]int) + + for i, ins := range insns { + if ins.Symbol() == "" { + continue + } + + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + offsets[ins.Symbol()] = i + } + + return offsets, nil +} + +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference() == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) + } + + sort.Strings(result) + return result +} + +// ReferenceOffsets returns the set of references and their offset in +// the instructions. +func (insns Instructions) ReferenceOffsets() map[string][]int { + offsets := make(map[string][]int) + + for i, ins := range insns { + if ins.Reference() == "" { + continue + } + + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) + } + + return offsets +} + +// Format implements fmt.Formatter. +// +// You can control indentation of symbols by +// specifying a width. Setting a precision controls the indentation of +// instructions. +// The default character is a tab, which can be overridden by specifying +// the ' ' space flag. +func (insns Instructions) Format(f fmt.State, c rune) { + if c != 's' && c != 'v' { + fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) + return + } + + // Precision is better in this case, because it allows + // specifying 0 padding easily. + padding, ok := f.Precision() + if !ok { + padding = 1 + } + + indent := strings.Repeat("\t", padding) + if f.Flag(' ') { + indent = strings.Repeat(" ", padding) + } + + symPadding, ok := f.Width() + if !ok { + symPadding = padding - 1 + } + if symPadding < 0 { + symPadding = 0 + } + + symIndent := strings.Repeat("\t", symPadding) + if f.Flag(' ') { + symIndent = strings.Repeat(" ", symPadding) + } + + // Guess how many digits we need at most, by assuming that all instructions + // are double wide. + highestOffset := len(insns) * 2 + offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) + + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } + } + fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) + } +} + +// Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. +func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + + for i, ins := range insns { + if _, err := ins.Marshal(w, bo); err != nil { + return fmt.Errorf("instruction %d: %w", i, err) + } + } + return nil +} + +// Tag calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in the kernel and so can be compared +// to ProgramInfo.Tag to figure out whether a loaded program matches +// certain instructions. +func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { + h := sha1.New() + for i, ins := range insns { + if ins.IsLoadFromMap() { + ins.Constant = 0 + } + _, err := ins.Marshal(h, bo) + if err != nil { + return "", fmt.Errorf("instruction %d: %w", i, err) + } + } + return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil +} + +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1, + ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil +} + +// Iterate allows iterating a BPF program while keeping track of +// various offsets. +// +// Modifying the instruction slice will lead to undefined behaviour. +func (insns Instructions) Iterate() *InstructionIterator { + return &InstructionIterator{insns: insns} +} + +// InstructionIterator iterates over a BPF program. +type InstructionIterator struct { + insns Instructions + // The instruction in question. + Ins *Instruction + // The index of the instruction in the original instruction slice. + Index int + // The offset of the instruction in raw BPF instructions. This accounts + // for double-wide instructions. + Offset RawInstructionOffset +} + +// Next returns true as long as there are any instructions remaining. +func (iter *InstructionIterator) Next() bool { + if len(iter.insns) == 0 { + return false + } + + if iter.Ins != nil { + iter.Index++ + iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions()) + } + iter.Ins = &iter.insns[0] + iter.insns = iter.insns[1:] + return true +} + +type bpfRegisters uint8 + +func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { + switch bo { + case binary.LittleEndian: + return bpfRegisters((src << 4) | (dst & 0xF)), nil + case binary.BigEndian: + return bpfRegisters((dst << 4) | (src & 0xF)), nil + default: + return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) + } +} + +// IsUnreferencedSymbol returns true if err was caused by +// an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). +func IsUnreferencedSymbol(err error) bool { + return errors.Is(err, ErrUnreferencedSymbol) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go new file mode 100644 index 0000000000..2738d736b2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -0,0 +1,135 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output jump_string.go -type=JumpOp + +// JumpOp affect control flow. +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type JumpOp uint8 + +const jumpMask OpCode = 0xf0 + +const ( + // InvalidJumpOp is returned by getters when invoked + // on non branch OpCodes + InvalidJumpOp JumpOp = 0xff + // Ja jumps by offset unconditionally + Ja JumpOp = 0x00 + // JEq jumps by offset if r == imm + JEq JumpOp = 0x10 + // JGT jumps by offset if r > imm + JGT JumpOp = 0x20 + // JGE jumps by offset if r >= imm + JGE JumpOp = 0x30 + // JSet jumps by offset if r & imm + JSet JumpOp = 0x40 + // JNE jumps by offset if r != imm + JNE JumpOp = 0x50 + // JSGT jumps by offset if signed r > signed imm + JSGT JumpOp = 0x60 + // JSGE jumps by offset if signed r >= signed imm + JSGE JumpOp = 0x70 + // Call builtin or user defined function from imm + Call JumpOp = 0x80 + // Exit ends execution, with value in r0 + Exit JumpOp = 0x90 + // JLT jumps by offset if r < imm + JLT JumpOp = 0xa0 + // JLE jumps by offset if r <= imm + JLE JumpOp = 0xb0 + // JSLT jumps by offset if signed r < signed imm + JSLT JumpOp = 0xc0 + // JSLE jumps by offset if signed r <= signed imm + JSLE JumpOp = 0xd0 +) + +// Return emits an exit instruction. +// +// Requires a return value in R0. +func Return() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Exit), + } +} + +// Op returns the OpCode for a given jump source. +func (op JumpOp) Op(source Source) OpCode { + return OpCode(JumpClass).SetJumpOp(op).SetSource(source) +} + +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Reg(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call { + return InvalidOpCode + } + + return OpCode(class).SetJumpOp(op).SetSource(source) +} + +// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1]. +func LongJump(label string) Instruction { + return Instruction{ + OpCode: Ja.opCode(Jump32Class, ImmSource), + Constant: -1, + }.WithReference(label) +} + +// Label adjusts PC to the address of the label. +func (op JumpOp) Label(label string) Instruction { + if op == Call { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go new file mode 100644 index 0000000000..85a4aaffa5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidJumpOp-255] + _ = x[Ja-0] + _ = x[JEq-16] + _ = x[JGT-32] + _ = x[JGE-48] + _ = x[JSet-64] + _ = x[JNE-80] + _ = x[JSGT-96] + _ = x[JSGE-112] + _ = x[Call-128] + _ = x[Exit-144] + _ = x[JLT-160] + _ = x[JLE-176] + _ = x[JSLT-192] + _ = x[JSLE-208] +} + +const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" + +var _JumpOp_map = map[JumpOp]string{ + 0: _JumpOp_name[0:2], + 16: _JumpOp_name[2:5], + 32: _JumpOp_name[5:8], + 48: _JumpOp_name[8:11], + 64: _JumpOp_name[11:15], + 80: _JumpOp_name[15:18], + 96: _JumpOp_name[18:22], + 112: _JumpOp_name[22:26], + 128: _JumpOp_name[26:30], + 144: _JumpOp_name[30:34], + 160: _JumpOp_name[34:37], + 176: _JumpOp_name[37:40], + 192: _JumpOp_name[40:44], + 208: _JumpOp_name[44:48], + 255: _JumpOp_name[48:61], +} + +func (i JumpOp) String() string { + if str, ok := _JumpOp_map[i]; ok { + return str + } + return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go new file mode 100644 index 0000000000..cdb5c5cfa4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -0,0 +1,225 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size + +// Mode for load and store operations +// +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ +type Mode uint8 + +const modeMask OpCode = 0xe0 + +const ( + // InvalidMode is returned by getters when invoked + // on non load / store OpCodes + InvalidMode Mode = 0xff + // ImmMode - immediate value + ImmMode Mode = 0x00 + // AbsMode - immediate value + offset + AbsMode Mode = 0x20 + // IndMode - indirect (imm+src) + IndMode Mode = 0x40 + // MemMode - load from memory + MemMode Mode = 0x60 + // MemSXMode - load from memory, sign extension + MemSXMode Mode = 0x80 + // XAddMode - add atomically across processors. + XAddMode Mode = 0xc0 +) + +// Size of load and store operations +// +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ +type Size uint8 + +const sizeMask OpCode = 0x18 + +const ( + // InvalidSize is returned by getters when invoked + // on non load / store OpCodes + InvalidSize Size = 0xff + // DWord - double word; 64 bits + DWord Size = 0x18 + // Word - word; 32 bits + Word Size = 0x00 + // Half - half-word; 16 bits + Half Size = 0x08 + // Byte - byte; 8 bits + Byte Size = 0x10 +) + +// Sizeof returns the size in bytes. +func (s Size) Sizeof() int { + switch s { + case DWord: + return 8 + case Word: + return 4 + case Half: + return 2 + case Byte: + return 1 + default: + return -1 + } +} + +// LoadMemOp returns the OpCode to load a value of given size from memory. +func LoadMemOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemMode).SetSize(size) +} + +// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended. +func LoadMemSXOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size) +} + +// LoadMem emits `dst = *(size *)(src + offset)`. +func LoadMem(dst, src Register, offset int16, size Size) Instruction { + return Instruction{ + OpCode: LoadMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst. +func LoadMemSX(dst, src Register, offset int16, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadMemSXOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadImmOp returns the OpCode to load an immediate of given size. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImmOp(size Size) OpCode { + return OpCode(LdClass).SetMode(ImmMode).SetSize(size) +} + +// LoadImm emits `dst = (size)value`. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImm(dst Register, value int64, size Size) Instruction { + return Instruction{ + OpCode: LoadImmOp(size), + Dst: dst, + Constant: value, + } +} + +// LoadMapPtr stores a pointer to a map in dst. +func LoadMapPtr(dst Register, fd int) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapFD, + Constant: int64(uint32(fd)), + } +} + +// LoadMapValue stores a pointer to the value at a certain offset of a map. +func LoadMapValue(dst Register, fd int, offset uint32) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd)) + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapValue, + Constant: int64(fdAndOffset), + } +} + +// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadIndOp(size Size) OpCode { + return OpCode(LdClass).SetMode(IndMode).SetSize(size) +} + +// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. +func LoadInd(dst, src Register, offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadIndOp(size), + Dst: dst, + Src: src, + Constant: int64(offset), + } +} + +// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadAbsOp(size Size) OpCode { + return OpCode(LdClass).SetMode(AbsMode).SetSize(size) +} + +// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. +func LoadAbs(offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadAbsOp(size), + Dst: R0, + Constant: int64(offset), + } +} + +// StoreMemOp returns the OpCode for storing a register of given size in memory. +func StoreMemOp(size Size) OpCode { + return OpCode(StXClass).SetMode(MemMode).SetSize(size) +} + +// StoreMem emits `*(size *)(dst + offset) = src` +func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// StoreImmOp returns the OpCode for storing an immediate of given size in memory. +func StoreImmOp(size Size) OpCode { + return OpCode(StClass).SetMode(MemMode).SetSize(size) +} + +// StoreImm emits `*(size *)(dst + offset) = value`. +func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + return Instruction{ + OpCode: StoreImmOp(size), + Dst: dst, + Offset: offset, + Constant: value, + } +} + +// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. +func StoreXAddOp(size Size) OpCode { + return OpCode(StXClass).SetMode(XAddMode).SetSize(size) +} + +// StoreXAdd atomically adds src to *dst. +func StoreXAdd(dst, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreXAddOp(size), + Dst: dst, + Src: src, + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go new file mode 100644 index 0000000000..c48080327c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -0,0 +1,84 @@ +// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidMode-255] + _ = x[ImmMode-0] + _ = x[AbsMode-32] + _ = x[IndMode-64] + _ = x[MemMode-96] + _ = x[MemSXMode-128] + _ = x[XAddMode-192] +} + +const ( + _Mode_name_0 = "ImmMode" + _Mode_name_1 = "AbsMode" + _Mode_name_2 = "IndMode" + _Mode_name_3 = "MemMode" + _Mode_name_4 = "MemSXMode" + _Mode_name_5 = "XAddMode" + _Mode_name_6 = "InvalidMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 32: + return _Mode_name_1 + case i == 64: + return _Mode_name_2 + case i == 96: + return _Mode_name_3 + case i == 128: + return _Mode_name_4 + case i == 192: + return _Mode_name_5 + case i == 255: + return _Mode_name_6 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSize-255] + _ = x[DWord-24] + _ = x[Word-0] + _ = x[Half-8] + _ = x[Byte-16] +} + +const ( + _Size_name_0 = "Word" + _Size_name_1 = "Half" + _Size_name_2 = "Byte" + _Size_name_3 = "DWord" + _Size_name_4 = "InvalidSize" +) + +func (i Size) String() string { + switch { + case i == 0: + return _Size_name_0 + case i == 8: + return _Size_name_1 + case i == 16: + return _Size_name_2 + case i == 24: + return _Size_name_3 + case i == 255: + return _Size_name_4 + default: + return "Size(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 0000000000..dd368a9360 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go new file mode 100644 index 0000000000..1dfd0b171a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -0,0 +1,303 @@ +package asm + +import ( + "fmt" + "strings" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output opcode_string.go -type=Class + +// Class of operations +// +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ +type Class uint8 + +const classMask OpCode = 0x07 + +const ( + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. + LdClass Class = 0x00 + // LdXClass loads memory into registers. + LdXClass Class = 0x01 + // StClass stores immediate values to memory. + StClass Class = 0x02 + // StXClass stores registers to memory. + StXClass Class = 0x03 + // ALUClass describes arithmetic operators. + ALUClass Class = 0x04 + // JumpClass describes jump operators. + JumpClass Class = 0x05 + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. + Jump32Class Class = 0x06 + // ALU64Class describes arithmetic operators in 64-bit mode. + ALU64Class Class = 0x07 +) + +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() +} + +// OpCode represents a single operation. +// It is not a 1:1 mapping to real eBPF opcodes. +// +// The encoding varies based on a 3-bit Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS +// +// For ALUClass and ALUCLass32: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// OPC |S| CLS +// +// For LdClass, LdXclass, StClass and StXClass: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For JumpClass, Jump32Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint16 + +// InvalidOpCode is returned by setters on OpCode +const InvalidOpCode OpCode = 0xffff + +// bpfOpCode returns the actual BPF opcode. +func (op OpCode) bpfOpCode() (byte, error) { + const opCodeMask = 0xff + + if !valid(op, opCodeMask) { + return 0, fmt.Errorf("invalid opcode %x", op) + } + + return byte(op & opCodeMask), nil +} + +// rawInstructions returns the number of BPF instructions required +// to encode this opcode. +func (op OpCode) rawInstructions() int { + if op.IsDWordLoad() { + return 2 + } + return 1 +} + +func (op OpCode) IsDWordLoad() bool { + return op == LoadImmOp(DWord) +} + +// Class returns the class of operation. +func (op OpCode) Class() Class { + return Class(op & classMask) +} + +// Mode returns the mode for load and store operations. +func (op OpCode) Mode() Mode { + if !op.Class().isLoadOrStore() { + return InvalidMode + } + return Mode(op & modeMask) +} + +// Size returns the size for load and store operations. +func (op OpCode) Size() Size { + if !op.Class().isLoadOrStore() { + return InvalidSize + } + return Size(op & sizeMask) +} + +// Source returns the source for branch and ALU operations. +func (op OpCode) Source() Source { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { + return InvalidSource + } + return Source(op & sourceMask) +} + +// ALUOp returns the ALUOp. +func (op OpCode) ALUOp() ALUOp { + if !op.Class().IsALU() { + return InvalidALUOp + } + return ALUOp(op & aluMask) +} + +// Endianness returns the Endianness for a byte swap instruction. +func (op OpCode) Endianness() Endianness { + if op.ALUOp() != Swap { + return InvalidEndian + } + return Endianness(op & endianMask) +} + +// JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. +func (op OpCode) JumpOp() JumpOp { + if !op.Class().IsJump() { + return InvalidJumpOp + } + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) { + return InvalidJumpOp + } + + return jumpOp +} + +// SetMode sets the mode on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetMode(mode Mode) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { + return InvalidOpCode + } + return (op & ^modeMask) | OpCode(mode) +} + +// SetSize sets the size on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSize(size Size) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { + return InvalidOpCode + } + return (op & ^sizeMask) | OpCode(size) +} + +// SetSource sets the source on jump and ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSource(source Source) OpCode { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { + return InvalidOpCode + } + return (op & ^sourceMask) | OpCode(source) +} + +// SetALUOp sets the ALUOp on ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetALUOp(alu ALUOp) OpCode { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { + return InvalidOpCode + } + return (op & ^aluMask) | OpCode(alu) +} + +// SetJumpOp sets the JumpOp on jump operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetJumpOp(jump JumpOp) OpCode { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { + return InvalidOpCode + } + + return newOp +} + +func (op OpCode) String() string { + var f strings.Builder + + switch class := op.Class(); { + case class.isLoadOrStore(): + f.WriteString(strings.TrimSuffix(class.String(), "Class")) + + mode := op.Mode() + f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + + switch op.Size() { + case DWord: + f.WriteString("DW") + case Word: + f.WriteString("W") + case Half: + f.WriteString("H") + case Byte: + f.WriteString("B") + } + + case class.IsALU(): + if op.ALUOp() == Swap && op.Class() == ALU64Class { + // B to make BSwap, uncontitional byte swap + f.WriteString("B") + } + + f.WriteString(op.ALUOp().String()) + + if op.ALUOp() == Swap { + if op.Class() == ALUClass { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } + } else { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + + if class == ALUClass { + f.WriteString("32") + } + } + + case class.IsJump(): + f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + + if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + default: + fmt.Fprintf(&f, "OpCode(%#x)", uint8(op)) + } + + return f.String() +} + +// valid returns true if all bits in value are covered by mask. +func valid(value, mask OpCode) bool { + return value & ^mask == 0 +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go new file mode 100644 index 0000000000..58bc3e7e7f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LdClass-0] + _ = x[LdXClass-1] + _ = x[StClass-2] + _ = x[StXClass-3] + _ = x[ALUClass-4] + _ = x[JumpClass-5] + _ = x[Jump32Class-6] + _ = x[ALU64Class-7] +} + +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" + +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} + +func (i Class) String() string { + if i >= Class(len(_Class_index)-1) { + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Class_name[_Class_index[i]:_Class_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go new file mode 100644 index 0000000000..457a3b8a88 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/register.go @@ -0,0 +1,51 @@ +package asm + +import ( + "fmt" +) + +// Register is the source or destination of most operations. +type Register uint8 + +// R0 contains return values. +const R0 Register = 0 + +// Registers for function arguments. +const ( + R1 Register = R0 + 1 + iota + R2 + R3 + R4 + R5 +) + +// Callee saved registers preserved by function calls. +const ( + R6 Register = R5 + 1 + iota + R7 + R8 + R9 +) + +// Read-only frame pointer to access stack. +const ( + R10 Register = R9 + 1 + RFP = R10 +) + +// Pseudo registers used by 64bit loads and jumps +const ( + PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD + PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE + PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC + PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL +) + +func (r Register) String() string { + v := uint8(r) + if v == 10 { + return "rfp" + } + return fmt.Sprintf("r%d", v) +} diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 0000000000..bece896bb6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,79 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] + _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachLSMCgroup-43] + _ = x[AttachStructOps-44] + _ = x[AttachNetfilter-45] + _ = x[AttachTCXIngress-46] + _ = x[AttachTCXEgress-47] + _ = x[AttachTraceUprobeMulti-48] + _ = x[AttachCgroupUnixConnect-49] + _ = x[AttachCgroupUnixSendmsg-50] + _ = x[AttachCgroupUnixRecvmsg-51] + _ = x[AttachCgroupUnixGetpeername-52] + _ = x[AttachCgroupUnixGetsockname-53] + _ = x[AttachNetkitPrimary-54] + _ = x[AttachNetkitPeer-55] +} + +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeer" + +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804} + +func (i AttachType) String() string { + if i >= AttachType(len(_AttachType_index)-1) { + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 0000000000..671f680b2a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,699 @@ +package btf + +import ( + "bufio" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") + ErrMultipleMatches = errors.New("multiple matching types") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +// immutableTypes is a set of types which musn't be changed. +type immutableTypes struct { + // All types contained by the spec, not including types from the base in + // case the spec was parsed from split BTF. + types []Type + + // Type IDs indexed by type. + typeIDs map[Type]TypeID + + // The ID of the first type in types. + firstTypeID TypeID + + // Types indexed by essential name. + // Includes all struct flavors and types with the same name. + namedTypes map[essentialName][]TypeID + + // Byte order of the types. This affects things like struct member order + // when using bitfields. + byteOrder binary.ByteOrder +} + +func (s *immutableTypes) typeByID(id TypeID) (Type, bool) { + if id < s.firstTypeID { + return nil, false + } + + index := int(id - s.firstTypeID) + if index >= len(s.types) { + return nil, false + } + + return s.types[index], true +} + +// mutableTypes is a set of types which may be changed. +type mutableTypes struct { + imm immutableTypes + mu sync.RWMutex // protects copies below + copies map[Type]Type // map[orig]copy + copiedTypeIDs map[Type]TypeID // map[copy]origID +} + +// add a type to the set of mutable types. +// +// Copies type and all of its children once. Repeated calls with the same type +// do not copy again. +func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type { + mt.mu.RLock() + cpy, ok := mt.copies[typ] + mt.mu.RUnlock() + + if ok { + // Fast path: the type has been copied before. + return cpy + } + + // modifyGraphPreorder copies the type graph node by node, so we can't drop + // the lock in between. + mt.mu.Lock() + defer mt.mu.Unlock() + + return copyType(typ, typeIDs, mt.copies, mt.copiedTypeIDs) +} + +// copy a set of mutable types. +func (mt *mutableTypes) copy() *mutableTypes { + if mt == nil { + return nil + } + + mtCopy := &mutableTypes{ + mt.imm, + sync.RWMutex{}, + make(map[Type]Type, len(mt.copies)), + make(map[Type]TypeID, len(mt.copiedTypeIDs)), + } + + // Prevent concurrent modification of mt.copiedTypeIDs. + mt.mu.RLock() + defer mt.mu.RUnlock() + + copiesOfCopies := make(map[Type]Type, len(mt.copies)) + for orig, copy := range mt.copies { + // NB: We make a copy of copy, not orig, so that changes to mutable types + // are preserved. + copyOfCopy := copyType(copy, mt.copiedTypeIDs, copiesOfCopies, mtCopy.copiedTypeIDs) + mtCopy.copies[orig] = copyOfCopy + } + + return mtCopy +} + +func (mt *mutableTypes) typeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + mt.mu.RLock() + defer mt.mu.RUnlock() + + id, ok := mt.copiedTypeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, false + } + + return mt.add(immT, mt.imm.typeIDs), true +} + +func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) { + immTypes := mt.imm.namedTypes[newEssentialName(name)] + if len(immTypes) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(immTypes)) + for _, id := range immTypes { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, fmt.Errorf("no type with ID %d", id) + } + + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if immT.TypeName() == name { + result = append(result, mt.add(immT, mt.imm.typeIDs)) + } + } + return result, nil +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + *mutableTypes + + // String table from ELF. + strings *stringTable +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + if bo := guessRawBTFByteOrder(rd); bo != nil { + return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil) + } + + return nil, err + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadata. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// symbolOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + offsets := make(map[symbol]uint32) + for _, sym := range symbols { + if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if sym.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(sym.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section) + } + + secName := file.Sections[sym.Section].Name + offsets[symbol{secName, sym.Name}] = uint32(sym.Value) + } + + return offsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + offsets, err := symbolOffsets(file) + if err != nil { + return nil, err + } + + if btfSection.ReaderAt == nil { + return nil, fmt.Errorf("compressed BTF is not supported") + } + + spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil) + if err != nil { + return nil, err + } + + err = fixupDatasec(spec.imm.types, sectionSizes, offsets) + if err != nil { + return nil, err + } + + return spec, nil +} + +func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) { + var ( + baseStrings *stringTable + firstTypeID TypeID + err error + ) + + if base != nil { + if base.imm.firstTypeID != 0 { + return nil, fmt.Errorf("can't use split BTF as base") + } + + baseStrings = base.strings + + firstTypeID, err = base.nextTypeID() + if err != nil { + return nil, err + } + } + + types, rawStrings, err := parseBTF(btf, bo, baseStrings, base) + if err != nil { + return nil, err + } + + typeIDs, typesByName := indexTypes(types, firstTypeID) + + return &Spec{ + &mutableTypes{ + immutableTypes{ + types, + typeIDs, + firstTypeID, + typesByName, + bo, + }, + sync.RWMutex{}, + make(map[Type]Type), + make(map[Type]TypeID), + }, + rawStrings, + }, nil +} + +func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) { + namedTypes := 0 + for _, typ := range types { + if typ.TypeName() != "" { + // Do a pre-pass to figure out how big types by name has to be. + // Most types have unique names, so it's OK to ignore essentialName + // here. + namedTypes++ + } + } + + typeIDs := make(map[Type]TypeID, len(types)) + typesByName := make(map[essentialName][]TypeID, namedTypes) + + for i, typ := range types { + id := firstTypeID + TypeID(i) + typeIDs[typ] = id + + if name := newEssentialName(typ.TypeName()); name != "" { + typesByName[name] = append(typesByName[name], id) + } + } + + return typeIDs, typesByName +} + +func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { + buf := new(bufio.Reader) + for _, bo := range []binary.ByteOrder{ + binary.LittleEndian, + binary.BigEndian, + } { + buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64)) + if _, err := parseBTFHeader(buf, bo); err == nil { + return bo + } + } + + return nil +} + +// parseBTF reads a .BTF section into memory and parses it into a list of +// raw types and a string table. +func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable, base *Spec) ([]Type, *stringTable, error) { + buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) + header, err := parseBTFHeader(buf, bo) + if err != nil { + return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), + baseStrings) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) + } + + buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) + types, err := readAndInflateTypes(buf, bo, header.TypeLen, rawStrings, base) + if err != nil { + return nil, nil, err + } + + return types, rawStrings, nil +} + +type symbol struct { + section string + name string +} + +// fixupDatasec attempts to patch up missing info in Datasecs and its members by +// supplementing them with information from the ELF headers and symbol table. +func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error { + for _, typ := range types { + ds, ok := typ.(*Datasec) + if !ok { + continue + } + + name := ds.Name + + // Some Datasecs are virtual and don't have corresponding ELF sections. + switch name { + case ".ksyms": + // .ksyms describes forward declarations of kfunc signatures. + // Nothing to fix up, all sizes and offsets are 0. + for _, vsi := range ds.Vars { + _, ok := vsi.Type.(*Func) + if !ok { + // Only Funcs are supported in the .ksyms Datasec. + return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported) + } + } + + continue + case ".kconfig": + // .kconfig has a size of 0 and has all members' offsets set to 0. + // Fix up all offsets and set the Datasec's size. + if err := fixupDatasecLayout(ds); err != nil { + return err + } + + // Fix up extern to global linkage to avoid a BTF verifier error. + for _, vsi := range ds.Vars { + vsi.Type.(*Var).Linkage = GlobalVar + } + + continue + } + + if ds.Size != 0 { + continue + } + + ds.Size, ok = sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + for i := range ds.Vars { + symName := ds.Vars[i].Type.TypeName() + ds.Vars[i].Offset, ok = offsets[symbol{name, symName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName) + } + } + } + + return nil +} + +// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and +// alignment. Calculate and set ds.Size. +func fixupDatasecLayout(ds *Datasec) error { + var off uint32 + + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type) + } + + size, err := Sizeof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting size: %w", v.Name, err) + } + align, err := alignof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err) + } + + // Align the current member based on the offset of the end of the previous + // member and the alignment of the current member. + off = internal.Align(off, uint32(align)) + + ds.Vars[i].Offset = off + + off += uint32(size) + } + + ds.Size = off + + return nil +} + +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + if s == nil { + return nil + } + + return &Spec{ + s.mutableTypes.copy(), + s.strings, + } +} + +type sliceWriter []byte + +func (sw sliceWriter) Write(p []byte) (int, error) { + if len(p) != len(sw) { + return 0, errors.New("size doesn't match") + } + + return copy(sw, p), nil +} + +// nextTypeID returns the next unallocated type ID or an error if there are no +// more type IDs. +func (s *Spec) nextTypeID() (TypeID, error) { + id := s.imm.firstTypeID + TypeID(len(s.imm.types)) + if id < s.imm.firstTypeID { + return 0, fmt.Errorf("no more type IDs") + } + return id, nil +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + typ, ok := s.typeByID(id) + if !ok { + return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound) + } + + return typ, nil +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + return s.mutableTypes.typeID(typ) +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + return s.mutableTypes.anyTypesByName(name) +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple Types +// with the same name can exist, the parameter typ is taken to narrow down the +// search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. On success, the +// address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +// Returns an error wrapping ErrMultipleTypes if multiple candidates are found. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typeInterface := reflect.TypeOf((*Type)(nil)).Elem() + + // typ may be **T or *Type + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if wanted == typeInterface { + // This is *Type. Unwrap the value's type. + wanted = typPtr.Elem().Type() + } + + if !wanted.AssignableTo(typeInterface) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + return loadRawSpec(r, internal.NativeEndian, base) +} + +// TypesIterator iterates over types of a given spec. +type TypesIterator struct { + spec *Spec + id TypeID + done bool + // The last visited type in the spec. + Type Type +} + +// Iterate returns the types iterator. +func (s *Spec) Iterate() *TypesIterator { + return &TypesIterator{spec: s, id: s.imm.firstTypeID} +} + +// Next returns true as long as there are any remaining types. +func (iter *TypesIterator) Next() bool { + if iter.done { + return false + } + + var ok bool + iter.Type, ok = iter.spec.typeByID(iter.id) + iter.id++ + iter.done = !ok + return !iter.done +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go new file mode 100644 index 0000000000..f0e327abc0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -0,0 +1,519 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind + +// btfKind describes a Type. +type btfKind uint8 + +// Equivalents of the BTF_KIND_* constants. +const ( + kindUnknown btfKind = iota // Unknown + kindInt // Int + kindPointer // Pointer + kindArray // Array + kindStruct // Struct + kindUnion // Union + kindEnum // Enum + kindForward // Forward + kindTypedef // Typedef + kindVolatile // Volatile + kindConst // Const + kindRestrict // Restrict + // Added ~4.20 + kindFunc // Func + kindFuncProto // FuncProto + // Added ~5.1 + kindVar // Var + kindDatasec // Datasec + // Added ~5.13 + kindFloat // Float + // Added 5.16 + kindDeclTag // DeclTag + kindTypeTag // TypeTag + // Added 6.0 + kindEnum64 // Enum64 +) + +// FuncLinkage describes BTF function linkage metadata. +type FuncLinkage int + +// Equivalent of enum btf_func_linkage. +const ( + StaticFunc FuncLinkage = iota // static + GlobalFunc // global + ExternFunc // extern +) + +// VarLinkage describes BTF variable linkage metadata. +type VarLinkage int + +const ( + StaticVar VarLinkage = iota // static + GlobalVar // global + ExternVar // extern +) + +const ( + btfTypeKindShift = 24 + btfTypeKindLen = 5 + btfTypeVlenShift = 0 + btfTypeVlenMask = 16 + btfTypeKindFlagShift = 31 + btfTypeKindFlagMask = 1 +) + +var btfHeaderLen = binary.Size(&btfHeader{}) + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// typeStart returns the offset from the beginning of the .BTF section +// to the start of its type entries. +func (h *btfHeader) typeStart() int64 { + return int64(h.HdrLen + h.TypeOff) +} + +// stringStart returns the offset from the beginning of the .BTF section +// to the start of its string table. +func (h *btfHeader) stringStart() int64 { + return int64(h.HdrLen + h.StringOff) +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { + var header btfHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, errors.New("header length shorter than btfHeader size") + } + + if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { + return nil, fmt.Errorf("header padding: %v", err) + } + + return &header, nil +} + +var btfTypeLen = binary.Size(btfType{}) + +// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. +type btfType struct { + NameOff uint32 + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members), linkage + * bits 16-23: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + Info uint32 + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + SizeType uint32 +} + +var btfTypeSize = int(unsafe.Sizeof(btfType{})) + +func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to unmarshal btfType") + } + + bt.NameOff = bo.Uint32(b[0:]) + bt.Info = bo.Uint32(b[4:]) + bt.SizeType = bo.Uint32(b[8:]) + return btfTypeSize, nil +} + +func mask(len uint32) uint32 { + return (1 << len) - 1 +} + +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + +func (bt *btfType) info(len, shift uint32) uint32 { + return readBits(bt.Info, len, shift) +} + +func (bt *btfType) setInfo(value, len, shift uint32) { + bt.Info = writeBits(bt.Info, len, shift, value) +} + +func (bt *btfType) Kind() btfKind { + return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) +} + +func (bt *btfType) SetKind(kind btfKind) { + bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) +} + +func (bt *btfType) Vlen() int { + return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetVlen(vlen int) { + bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) kindFlagBool() bool { + return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 +} + +func (bt *btfType) setKindFlagBool(set bool) { + var value uint32 + if set { + value = 1 + } + bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +// Bitfield returns true if the struct or union contain a bitfield. +func (bt *btfType) Bitfield() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetBitfield(isBitfield bool) { + bt.setKindFlagBool(isBitfield) +} + +func (bt *btfType) FwdKind() FwdKind { + return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift)) +} + +func (bt *btfType) SetFwdKind(kind FwdKind) { + bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +func (bt *btfType) Signed() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetSigned(signed bool) { + bt.setKindFlagBool(signed) +} + +func (bt *btfType) Linkage() FuncLinkage { + return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetLinkage(linkage FuncLinkage) { + bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) Type() TypeID { + // TODO: Panic here if wrong kind? + return TypeID(bt.SizeType) +} + +func (bt *btfType) SetType(id TypeID) { + bt.SizeType = uint32(id) +} + +func (bt *btfType) Size() uint32 { + // TODO: Panic here if wrong kind? + return bt.SizeType +} + +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + +func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error { + buf := make([]byte, unsafe.Sizeof(*bt)) + bo.PutUint32(buf[0:], bt.NameOff) + bo.PutUint32(buf[4:], bt.Info) + bo.PutUint32(buf[8:], bt.SizeType) + _, err := w.Write(buf) + return err +} + +type rawType struct { + btfType + data interface{} +} + +func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := rt.btfType.Marshal(w, bo); err != nil { + return err + } + + if rt.data == nil { + return nil + } + + return binary.Write(w, bo, rt.data) +} + +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +var btfIntLen = int(unsafe.Sizeof(btfInt{})) + +func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfIntLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfInt") + } + + bi.Raw = bo.Uint32(b[0:]) + return btfIntLen, nil +} + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + +type btfArray struct { + Type TypeID + IndexType TypeID + Nelems uint32 +} + +var btfArrayLen = int(unsafe.Sizeof(btfArray{})) + +func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfArrayLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfArray") + } + + ba.Type = TypeID(bo.Uint32(b[0:])) + ba.IndexType = TypeID(bo.Uint32(b[4:])) + ba.Nelems = bo.Uint32(b[8:]) + return btfArrayLen, nil +} + +type btfMember struct { + NameOff uint32 + Type TypeID + Offset uint32 +} + +var btfMemberLen = int(unsafe.Sizeof(btfMember{})) + +func unmarshalBtfMembers(members []btfMember, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range members { + if off+btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember %d", i) + } + + members[i].NameOff = bo.Uint32(b[off+0:]) + members[i].Type = TypeID(bo.Uint32(b[off+4:])) + members[i].Offset = bo.Uint32(b[off+8:]) + + off += btfMemberLen + } + + return off, nil +} + +type btfVarSecinfo struct { + Type TypeID + Offset uint32 + Size uint32 +} + +var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) + +func unmarshalBtfVarSecInfos(secinfos []btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range secinfos { + if off+btfVarSecinfoLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo %d", i) + } + + secinfos[i].Type = TypeID(bo.Uint32(b[off+0:])) + secinfos[i].Offset = bo.Uint32(b[off+4:]) + secinfos[i].Size = bo.Uint32(b[off+8:]) + + off += btfVarSecinfoLen + } + + return off, nil +} + +type btfVariable struct { + Linkage uint32 +} + +var btfVariableLen = int(unsafe.Sizeof(btfVariable{})) + +func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVariableLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable") + } + + bv.Linkage = bo.Uint32(b[0:]) + return btfVariableLen, nil +} + +type btfEnum struct { + NameOff uint32 + Val uint32 +} + +var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) + +func unmarshalBtfEnums(enums []btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].Val = bo.Uint32(b[off+4:]) + + off += btfEnumLen + } + + return off, nil +} + +type btfEnum64 struct { + NameOff uint32 + ValLo32 uint32 + ValHi32 uint32 +} + +var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) + +func unmarshalBtfEnums64(enums []btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnum64Len > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64 %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].ValLo32 = bo.Uint32(b[off+4:]) + enums[i].ValHi32 = bo.Uint32(b[off+8:]) + + off += btfEnum64Len + } + + return off, nil +} + +type btfParam struct { + NameOff uint32 + Type TypeID +} + +var btfParamLen = int(unsafe.Sizeof(btfParam{})) + +func unmarshalBtfParams(params []btfParam, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range params { + if off+btfParamLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam %d", i) + } + + params[i].NameOff = bo.Uint32(b[off+0:]) + params[i].Type = TypeID(bo.Uint32(b[off+4:])) + + off += btfParamLen + } + + return off, nil +} + +type btfDeclTag struct { + ComponentIdx uint32 +} + +var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{})) + +func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfDeclTagLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag") + } + + bdt.ComponentIdx = bo.Uint32(b[0:]) + return btfDeclTagLen, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go new file mode 100644 index 0000000000..b7a1b80d15 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go @@ -0,0 +1,80 @@ +// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT. + +package btf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticFunc-0] + _ = x[GlobalFunc-1] + _ = x[ExternFunc-2] +} + +const _FuncLinkage_name = "staticglobalextern" + +var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i FuncLinkage) String() string { + if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { + return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticVar-0] + _ = x[GlobalVar-1] + _ = x[ExternVar-2] +} + +const _VarLinkage_name = "staticglobalextern" + +var _VarLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i VarLinkage) String() string { + if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { + return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[kindUnknown-0] + _ = x[kindInt-1] + _ = x[kindPointer-2] + _ = x[kindArray-3] + _ = x[kindStruct-4] + _ = x[kindUnion-5] + _ = x[kindEnum-6] + _ = x[kindForward-7] + _ = x[kindTypedef-8] + _ = x[kindVolatile-9] + _ = x[kindConst-10] + _ = x[kindRestrict-11] + _ = x[kindFunc-12] + _ = x[kindFuncProto-13] + _ = x[kindVar-14] + _ = x[kindDatasec-15] + _ = x[kindFloat-16] + _ = x[kindDeclTag-17] + _ = x[kindTypeTag-18] + _ = x[kindEnum64-19] +} + +const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64" + +var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120} + +func (i btfKind) String() string { + if i >= btfKind(len(_btfKind_index)-1) { + return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go new file mode 100644 index 0000000000..ee89f98331 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/core.go @@ -0,0 +1,1261 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "slices" + "strconv" + "strings" + + "github.com/cilium/ebpf/asm" +) + +// Code in this file is derived from libbpf, which is available under a BSD +// 2-Clause license. + +// A constant used when CO-RE relocation has to remove instructions. +// +// Taken from libbpf. +const COREBadRelocationSentinel = 0xbad2310 + +// COREFixup is the result of computing a CO-RE relocation for a target. +type COREFixup struct { + kind coreKind + local uint64 + target uint64 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool +} + +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target +} + +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) + } + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) +} + +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if f.poison { + // Relocation is poisoned, replace the instruction with an invalid one. + if ins.OpCode.IsDWordLoad() { + // Replace a dword load with a invalid dword load to preserve instruction size. + *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord) + } else { + // Replace all single size instruction with a invalid call instruction. + *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call() + } + + // Add context to the kernel verifier output. + if source := ins.Source(); source != nil { + *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source))) + } else { + *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE")) + } + + return nil + } + + switch class := ins.OpCode.Class(); class { + case asm.LdXClass, asm.StClass, asm.StXClass: + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) + } + + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) + } + + ins.Offset = int16(f.target) + + case asm.LdClass: + if !ins.IsConstantLoad(asm.DWord) { + return fmt.Errorf("not a dword-sized immediate load") + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) + } + + ins.Constant = int64(f.target) + + case asm.ALUClass: + if ins.OpCode.ALUOp() == asm.Swap { + return fmt.Errorf("relocation against swap") + } + + fallthrough + + case asm.ALU64Class: + if src := ins.OpCode.Source(); src != asm.ImmSource { + return fmt.Errorf("invalid source %s", src) + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) + } + + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) + } + + ins.Constant = int64(f.target) + + default: + return fmt.Errorf("invalid class %s", class) + } + + return nil +} + +func (f COREFixup) isNonExistant() bool { + return f.kind.checksForExistence() && f.target == 0 +} + +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 + +const ( + reloFieldByteOffset coreKind = iota /* field byte offset */ + reloFieldByteSize /* field size in bytes */ + reloFieldExists /* field existence in target kernel */ + reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ + reloFieldLShiftU64 /* bitfield-specific left bitshift */ + reloFieldRShiftU64 /* bitfield-specific right bitshift */ + reloTypeIDLocal /* type ID in local BPF object */ + reloTypeIDTarget /* type ID in target kernel */ + reloTypeExists /* type existence in target kernel */ + reloTypeSize /* type size in bytes */ + reloEnumvalExists /* enum value existence in target kernel */ + reloEnumvalValue /* enum value integer value */ + reloTypeMatches /* type matches kernel type */ +) + +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches +} + +func (k coreKind) String() string { + switch k { + case reloFieldByteOffset: + return "byte_off" + case reloFieldByteSize: + return "byte_sz" + case reloFieldExists: + return "field_exists" + case reloFieldSigned: + return "signed" + case reloFieldLShiftU64: + return "lshift_u64" + case reloFieldRShiftU64: + return "rshift_u64" + case reloTypeIDLocal: + return "local_type_id" + case reloTypeIDTarget: + return "target_type_id" + case reloTypeExists: + return "type_exists" + case reloTypeSize: + return "type_size" + case reloEnumvalExists: + return "enumval_exists" + case reloEnumvalValue: + return "enumval_value" + case reloTypeMatches: + return "type_matches" + default: + return fmt.Sprintf("unknown (%d)", k) + } +} + +// CORERelocate calculates changes needed to adjust eBPF instructions for differences +// in types. +// +// targets forms the set of types to relocate against. The first element has to be +// BTF for vmlinux, the following must be types for kernel modules. +// +// resolveLocalTypeID is called for each local type which requires a stable TypeID. +// Calling the function with the same type multiple times must produce the same +// result. It is the callers responsibility to ensure that the relocated instructions +// are loaded with matching BTF. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + if len(targets) == 0 { + // Explicitly check for nil here since the argument used to be optional. + return nil, fmt.Errorf("targets must be provided") + } + + // We can't encode type IDs that aren't for vmlinux into instructions at the + // moment. + resolveTargetTypeID := targets[0].TypeID + + for _, target := range targets { + if bo != target.imm.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) + } + } + + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind == reloTypeIDLocal { + // Filtering out reloTypeIDLocal here makes our lives a lot easier + // down the line, since it doesn't have a target at all. + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + id, err := resolveLocalTypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint64(relo.id), + target: uint64(id), + } + continue + } + + group, ok := relosByType[relo.typ] + if !ok { + group = &reloGroup{} + relosByType[relo.typ] = group + } + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) + } + + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { + return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) + } + + essentialName := newEssentialName(localTypeName) + + var targetTypes []Type + for _, target := range targets { + namedTypeIDs := target.imm.namedTypes[essentialName] + targetTypes = slices.Grow(targetTypes, len(namedTypeIDs)) + for _, id := range namedTypeIDs { + typ, err := target.TypeByID(id) + if err != nil { + return nil, err + } + + targetTypes = append(targetTypes, typ) + } + } + + fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("relocate %s: %w", localType, err) + } + + for j, index := range group.indices { + result[index] = fixups[j] + } + } + + return result, nil +} + +var errAmbiguousRelocation = errors.New("ambiguous relocation") +var errImpossibleRelocation = errors.New("impossible relocation") +var errIncompatibleTypes = errors.New("incompatible types") + +// coreCalculateFixups finds the target type that best matches all relocations. +// +// All relos must target the same type. +// +// The best target is determined by scoring: the less poisoning we have to do +// the better the target is. +func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + bestScore := len(relos) + var bestFixups []COREFixup + for _, target := range targets { + score := 0 // lower is better + fixups := make([]COREFixup, 0, len(relos)) + for _, relo := range relos { + fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) + } + if fixup.poison || fixup.isNonExistant() { + score++ + } + fixups = append(fixups, fixup) + } + + if score > bestScore { + // We have a better target already, ignore this one. + continue + } + + if score < bestScore { + // This is the best target yet, use it. + bestScore = score + bestFixups = fixups + continue + } + + // Some other target has the same score as the current one. Make sure + // the fixups agree with each other. + for i, fixup := range bestFixups { + if !fixup.equal(fixups[i]) { + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) + } + } + } + + if bestFixups == nil { + // Nothing at all matched, probably because there are no suitable + // targets at all. + // + // Poison everything except checksForExistence. + bestFixups = make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } + } + } + + return bestFixups, nil +} + +var errNoSignedness = errors.New("no signedness") + +// coreCalculateFixup calculates the fixup given a relocation and a target type. +func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) { + fixup := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil + } + poison := func() (COREFixup, error) { + if relo.kind.checksForExistence() { + return fixup(1, 0) + } + return COREFixup{kind: relo.kind, poison: true}, nil + } + zero := COREFixup{} + + local := relo.typ + + switch relo.kind { + case reloTypeMatches: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := coreTypesMatch(local, target, nil) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + return fixup(1, 1) + + case reloTypeIDTarget, reloTypeSize, reloTypeExists: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := CheckTypeCompatibility(local, target) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloTypeExists: + return fixup(1, 1) + + case reloTypeIDTarget: + targetID, err := resolveTargetTypeID(target) + if errors.Is(err, ErrNotFound) { + // Probably a relocation trying to get the ID + // of a type from a kmod. + return poison() + } + if err != nil { + return zero, err + } + return fixup(uint64(relo.id), uint64(targetID)) + + case reloTypeSize: + localSize, err := Sizeof(local) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(target) + if err != nil { + return zero, err + } + + return fixup(uint64(localSize), uint64(targetSize)) + } + + case reloEnumvalValue, reloEnumvalExists: + localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloEnumvalExists: + return fixup(1, 1) + + case reloEnumvalValue: + return fixup(localValue.Value, targetValue.Value) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: + if _, ok := As[*Fwd](target); ok { + // We can't relocate fields using a forward declaration, so + // skip it. If a non-forward declaration is present in the BTF + // we'll find it in one of the other iterations. + return poison() + } + + localField, targetField, err := coreFindField(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + + switch relo.kind { + case reloFieldExists: + return fixup(1, 1) + + case reloFieldByteOffset: + return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) + + case reloFieldByteSize: + localSize, err := Sizeof(localField.Type) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) + + case reloFieldLShiftU64: + var target uint64 + if bo == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + target = uint64(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + return fixupWithoutValidation(0, uint64(64-targetSize)) + + case reloFieldSigned: + switch local := UnderlyingType(localField.Type).(type) { + case *Enum: + target, ok := As[*Enum](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) + } + + return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) + case *Int: + target, ok := As[*Int](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type) + } + + return fixup( + uint64(local.Encoding&Signed), + uint64(target.Encoding&Signed), + ) + default: + return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) + } + } + } + + return zero, ErrNotSupported +} + +func boolToUint64(val bool) uint64 { + if val { + return 1 + } + return 0 +} + +/* coreAccessor contains a path through a struct. It contains at least one index. + * + * The interpretation depends on the kind of the relocation. The following is + * taken from struct bpf_core_relo in libbpf_internal.h: + * + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * + * Example to provide a better feel. + * + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample s = ...; + * int x = &s->a; // encoded as "0:0" (a is field #0) + * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + */ +type coreAccessor []int + +func parseCOREAccessor(accessor string) (coreAccessor, error) { + if accessor == "" { + return nil, fmt.Errorf("empty accessor") + } + + parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) + for _, part := range parts { + // 31 bits to avoid overflowing int on 32 bit platforms. + index, err := strconv.ParseUint(part, 10, 31) + if err != nil { + return nil, fmt.Errorf("accessor index %q: %s", part, err) + } + + result = append(result, int(index)) + } + + return result, nil +} + +func (ca coreAccessor) String() string { + strs := make([]string, 0, len(ca)) + for _, i := range ca { + strs = append(strs, strconv.Itoa(i)) + } + return strings.Join(strs, ":") +} + +func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { + e, ok := As[*Enum](t) + if !ok { + return nil, fmt.Errorf("not an enum: %s", t) + } + + if len(ca) > 1 { + return nil, fmt.Errorf("invalid accessor %s for enum", ca) + } + + i := ca[0] + if i >= len(e.Values) { + return nil, fmt.Errorf("invalid index %d for %s", i, e) + } + + return &e.Values[i], nil +} + +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ +type coreField struct { + Type Type + + // The position of the field from the start of the composite type in bytes. + offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + if n == 0 { + return nil + } + + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil +} + +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) + if err != nil { + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil + } + + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, err + } + return Bits(size * 8), nil +} + +// coreFindField descends into the local type using the accessor and tries to +// find an equivalent field in target at each step. +// +// Returns the field and the offset of the field from the start of +// target in bits. +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) + } + + // The first index is used to offset a pointer of the base type like + // when accessing an array. + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + var localMaybeFlex, targetMaybeFlex bool + for i, acc := range localAcc[1:] { + switch localType := UnderlyingType(local.Type).(type) { + case composite: + // For composite types acc is used to find the field in the local type, + // and then we try to find a field in target with the same name. + localMembers := localType.members() + if acc >= len(localMembers) { + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) + } + + localMember := localMembers[acc] + if localMember.Name == "" { + localMemberType, ok := As[composite](localMember.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) + } + + // This is an anonymous struct or union, ignore it. + local = coreField{ + Type: localMemberType, + offset: local.offset + localMember.Offset.Bytes(), + } + localMaybeFlex = false + continue + } + + targetType, ok := As[composite](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) + } + + targetMember, last, err := coreFindMember(targetType, localMember.Name) + if err != nil { + return coreField{}, coreField{}, err + } + + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, + } + localMaybeFlex = acc == len(localMembers)-1 + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } + targetMaybeFlex = last + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + case *Array: + // For arrays, acc is the index in the target. + targetType, ok := As[*Array](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) + } + + if localType.Nelems == 0 && !localMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") + } + if targetType.Nelems == 0 && !targetMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") + } + + if localType.Nelems > 0 && acc >= int(localType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) + } + if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) + } + + local = coreField{ + Type: localType.Type, + offset: local.offset, + } + localMaybeFlex = false + + if err := local.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } + targetMaybeFlex = false + + if err := target.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + default: + return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) + } + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, err + } + } + + return local, target, nil +} + +// coreFindMember finds a member in a composite type while handling anonymous +// structs and unions. +func coreFindMember(typ composite, name string) (Member, bool, error) { + if name == "" { + return Member{}, false, errors.New("can't search for anonymous member") + } + + type offsetTarget struct { + composite + offset Bits + } + + targets := []offsetTarget{{typ, 0}} + visited := make(map[composite]bool) + + for i := 0; i < len(targets); i++ { + target := targets[i] + + // Only visit targets once to prevent infinite recursion. + if visited[target] { + continue + } + if len(visited) >= maxResolveDepth { + // This check is different than libbpf, which restricts the entire + // path to BPF_CORE_SPEC_MAX_LEN items. + return Member{}, false, fmt.Errorf("type is nested too deep") + } + visited[target] = true + + members := target.members() + for j, member := range members { + if member.Name == name { + // NB: This is safe because member is a copy. + member.Offset += target.offset + return member, j == len(members)-1, nil + } + + // The names don't match, but this member could be an anonymous struct + // or union. + if member.Name != "" { + continue + } + + comp, ok := As[composite](member.Type) + if !ok { + return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) + } + + targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) + } + } + + return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) +} + +// coreFindEnumValue follows localAcc to find the equivalent enum value in target. +func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { + localValue, err := localAcc.enumValue(local) + if err != nil { + return nil, nil, err + } + + targetEnum, ok := As[*Enum](target) + if !ok { + return nil, nil, errImpossibleRelocation + } + + localName := newEssentialName(localValue.Name) + for i, targetValue := range targetEnum.Values { + if newEssentialName(targetValue.Name) != localName { + continue + } + + return localValue, &targetEnum.Values[i], nil + } + + return nil, nil, errImpossibleRelocation +} + +// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules. +// +// Only layout compatibility is checked, ignoring names of the root type. +func CheckTypeCompatibility(localType Type, targetType Type) error { + return coreAreTypesCompatible(localType, targetType, nil) +} + +type pair struct { + A, B Type +} + +/* The comment below is from bpf_core_types_are_compat in libbpf.c: + * + * Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errIncompatibleTypes if types are not compatible. + */ +func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := localType.(type) { + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: + return nil + + case *Pointer: + tv := targetType.(*Pointer) + return coreAreTypesCompatible(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil { + return err + } + + return coreAreTypesCompatible(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil { + return err + } + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, localParam := range lv.Params { + targetParam := tv.Params[i] + if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil { + return err + } + } + + return nil + + default: + return fmt.Errorf("unsupported type %T", localType) + } +} + +/* coreAreMembersCompatible checks two types for field-based relocation compatibility. + * + * The comment below is from bpf_core_fields_are_compat in libbpf.c: + * + * Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * [ NB: coreAreMembersCompatible doesn't recurse, this check is done + * by coreFindField. ] + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if the members are not compatible. + */ +func coreAreMembersCompatible(localType Type, targetType Type) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + _, lok := localType.(composite) + _, tok := targetType.(composite) + if lok && tok { + return nil + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := localType.(type) { + case *Array, *Pointer, *Float, *Int: + return nil + + case *Enum: + tv := targetType.(*Enum) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + case *Fwd: + tv := targetType.(*Fwd) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + default: + return fmt.Errorf("type %s: %w", localType, ErrNotSupported) + } +} + +// coreEssentialNamesMatch compares two names while ignoring their flavour suffix. +// +// This should only be used on names which are in the global scope, like struct +// names, typedefs or enum values. +func coreEssentialNamesMatch(a, b string) bool { + if a == "" || b == "" { + // allow anonymous and named type to match + return true + } + + return newEssentialName(a) == newEssentialName(b) +} + +/* The comment below is from __bpf_core_types_match in relo_core.c: + * + * Check that two types "match". This function assumes that root types were + * already checked for name match. + * + * The matching relation is defined as follows: + * - modifiers and typedefs are stripped (and, hence, effectively ignored) + * - generally speaking types need to be of same kind (struct vs. struct, union + * vs. union, etc.) + * - exceptions are struct/union behind a pointer which could also match a + * forward declaration of a struct or union, respectively, and enum vs. + * enum64 (see below) + * Then, depending on type: + * - integers: + * - match if size and signedness match + * - arrays & pointers: + * - target types are recursively matched + * - structs & unions: + * - local members need to exist in target with the same name + * - for each member we recursively check match unless it is already behind a + * pointer, in which case we only check matching names and compatible kind + * - enums: + * - local variants have to have a match in target by symbolic name (but not + * numeric value) + * - size has to match (but enum may match enum64 and vice versa) + * - function pointers: + * - number and position of arguments in local type has to match target + * - for each argument and the return value we recursively check match + */ +func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) { + return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes) + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := (localType).(type) { + case *Void: + + case *Fwd: + if targetType.(*Fwd).Kind != lv.Kind { + return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Enum: + return coreEnumsMatch(lv, targetType.(*Enum)) + + case composite: + tv := targetType.(composite) + + if len(lv.members()) > len(tv.members()) { + return errIncompatibleTypes + } + + localMembers := lv.members() + targetMembers := map[string]Member{} + for _, member := range tv.members() { + targetMembers[member.Name] = member + } + + for _, localMember := range localMembers { + targetMember, found := targetMembers[localMember.Name] + if !found { + return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes) + } + + err := coreTypesMatch(localMember.Type, targetMember.Type, visited) + if err != nil { + return err + } + } + + case *Int: + if !coreEncodingMatches(lv, targetType.(*Int)) { + return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Pointer: + tv := targetType.(*Pointer) + + // Allow a pointer to a forward declaration to match a struct + // or union. + if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) { + return nil + } + + if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) { + return nil + } + + return coreTypesMatch(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + + if lv.Nelems != tv.Nelems { + return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + return coreTypesMatch(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, lparam := range lv.Params { + if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil { + return err + } + } + + return coreTypesMatch(lv.Return, tv.Return, visited) + + default: + return fmt.Errorf("unsupported type %T", localType) + } + + return nil +} + +// coreEncodingMatches returns true if both ints have the same size and signedness. +// All encodings other than `Signed` are considered unsigned. +func coreEncodingMatches(local, target *Int) bool { + return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed) +} + +// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true: +// - size has to match (but enum may match enum64 and vice versa) +// - local variants have to have a match in target by symbolic name (but not numeric value) +func coreEnumsMatch(local *Enum, target *Enum) error { + if local.Size != target.Size { + return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes) + } + + // If there are more values in the local than the target, there must be at least one value in the local + // that isn't in the target, and therefor the types are incompatible. + if len(local.Values) > len(target.Values) { + return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes) + } + +outer: + for _, lv := range local.Values { + for _, rv := range target.Values { + if coreEssentialNamesMatch(lv.Name, rv.Name) { + continue outer + } + } + + return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go new file mode 100644 index 0000000000..b1f4b1fc3e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/doc.go @@ -0,0 +1,5 @@ +// Package btf handles data encoded according to the BPF Type Format. +// +// The canonical documentation lives in the Linux kernel repository and is +// available at https://www.kernel.org/doc/html/latest/bpf/btf.html +package btf diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 0000000000..eb9044badf --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,835 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string]FuncInfos + lineInfos map[string]LineInfos + relocationInfos map[string]CORERelocationInfos +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, spec) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string]FuncInfos, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncInfos(bfis, spec) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string]LineInfos, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type funcInfoMeta struct{} +type coreRelocationMeta struct{} + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos) +} + +// Assign per-instruction metadata to the instructions in insns. +func AssignMetadataToInstructions( + insns asm.Instructions, + funcInfos FuncInfos, + lineInfos LineInfos, + reloInfos CORERelocationInfos, +) { + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos.infos) > 0 && funcInfos.infos[0].offset == iter.Offset { + *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos.infos[0].fn) + funcInfos.infos = funcInfos.infos[1:] + } + + if len(lineInfos.infos) > 0 && lineInfos.infos[0].offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos.infos[0].line) + lineInfos.infos = lineInfos.infos[1:] + } + + if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo) + reloInfos.infos = reloInfos.infos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +// +// If an instruction has an [asm.Comment], it will be synthesized into a mostly +// empty line info. +func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil { + goto marshal + } + } + + return nil, nil, nil + +marshal: + var fiBuf, liBuf bytes.Buffer + for { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &funcInfo{ + fn: fn, + offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, b); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if source := iter.Ins.Source(); source != nil { + var line *Line + if l, ok := source.(*Line); ok { + line = l + } else { + line = &Line{ + line: source.String(), + } + } + + li := &lineInfo{ + line: line, + offset: iter.Offset, + } + if err := li.marshal(&liBuf, b); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + + if !iter.Next() { + break + } + } + + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// FuncInfos contains a sorted list of func infos. +type FuncInfos struct { + infos []funcInfo +} + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +type funcInfo struct { + fn *Func + offset asm.RawInstructionOffset +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) { + typ, err := spec.TypeByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &funcInfo{ + fn, + asm.RawInstructionOffset(fi.InsnOff), + }, nil +} + +func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) (FuncInfos, error) { + fis := FuncInfos{ + infos: make([]funcInfo, 0, len(bfis)), + } + for _, bfi := range bfis { + fi, err := newFuncInfo(bfi, spec) + if err != nil { + return FuncInfos{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fis.infos = append(fis.infos, *fi) + } + sort.Slice(fis.infos, func(i, j int) bool { + return fis.infos[i].offset <= fis.infos[j].offset + }) + return fis, nil +} + +// LoadFuncInfos parses BTF func info in kernel wire format. +func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncInfos, error) { + fis, err := parseFuncInfoRecords( + reader, + bo, + FuncInfoSize, + recordNum, + false, + ) + if err != nil { + return FuncInfos{}, fmt.Errorf("parsing BTF func info: %w", err) + } + + return newFuncInfos(fis, spec) +} + +// marshal into the BTF wire format. +func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error { + id, err := b.Add(fi.fn) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.offset), + TypeID: id, + } + buf := make([]byte, FuncInfoSize) + internal.NativeEndian.PutUint32(buf, bfi.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID)) + _, err = w.Write(buf) + return err +} + +// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if offsetInBytes { + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + } + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +// LineInfos contains a sorted list of line infos. +type LineInfos struct { + infos []lineInfo +} + +type lineInfo struct { + line *Line + offset asm.RawInstructionOffset +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +// LoadLineInfos parses BTF line info in kernel wire format. +func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineInfos, error) { + lis, err := parseLineInfoRecords( + reader, + bo, + LineInfoSize, + recordNum, + false, + ) + if err != nil { + return LineInfos{}, fmt.Errorf("parsing BTF line info: %w", err) + } + + return newLineInfos(lis, spec.strings) +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (lineInfo, error) { + line, err := strings.Lookup(li.LineOff) + if err != nil { + return lineInfo{}, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.Lookup(li.FileNameOff) + if err != nil { + return lineInfo{}, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return lineInfo{ + &Line{ + fileName, + line, + lineNumber, + lineColumn, + }, + asm.RawInstructionOffset(li.InsnOff), + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineInfos, error) { + lis := LineInfos{ + infos: make([]lineInfo, 0, len(blis)), + } + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return LineInfos{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis.infos = append(lis.infos, li) + } + sort.Slice(lis.infos, func(i, j int) bool { + return lis.infos[i].offset <= lis.infos[j].offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error { + line := li.line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + fileNameOff, err := b.addString(line.fileName) + if err != nil { + return fmt.Errorf("file name %q: %w", line.fileName, err) + } + + lineOff, err := b.addString(line.line) + if err != nil { + return fmt.Errorf("line %q: %w", line.line, err) + } + + bli := bpfLineInfo{ + uint32(li.offset), + fileNameOff, + lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + + buf := make([]byte, LineInfoSize) + internal.NativeEndian.PutUint32(buf, bli.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff) + internal.NativeEndian.PutUint32(buf[8:], bli.LineOff) + internal.NativeEndian.PutUint32(buf[12:], bli.LineCol) + _, err = w.Write(buf) + return err +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { + var li bpfLineInfo + + if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + out := make([]bpfLineInfo, 0, recordNum) + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &li); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if offsetInBytes { + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + } + + out = append(out, li) + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + // The local type of the relocation, stripped of typedefs and qualifiers. + typ Type + accessor coreAccessor + kind coreKind + // The ID of the local type in the source BTF. + id TypeID +} + +func (cr *CORERelocation) String() string { + return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id) +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +// CORERelocationInfos contains a sorted list of co:re relocation infos. +type CORERelocationInfos struct { + infos []coreRelocationInfo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := spec.TypeByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + relo.TypeID, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) { + rs := CORERelocationInfos{ + infos: make([]coreRelocationInfo, 0, len(brs)), + } + for _, br := range brs { + relo, err := newRelocationInfo(br, spec, strings) + if err != nil { + return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs.infos = append(rs.infos, *relo) + } + sort.Slice(rs.infos, func(i, j int) bool { + return rs.infos[i].offset < rs.infos[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go new file mode 100644 index 0000000000..6feb08dfbb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/feature.go @@ -0,0 +1,123 @@ +package btf + +import ( + "errors" + "math" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// haveBTF attempts to load a BTF blob containing an Int. It should pass on any +// kernel that supports BPF_BTF_LOAD. +var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error { + // 0-length anonymous integer + err := probeBTF(&Int{}) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}) + +// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is +// used as a proxy for .bss, .data and .rodata map support, which generally +// come with a Var and Datasec. These were introduced in Linux 5.2. +var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error { + if err := haveBTF(); err != nil { + return err + } + + v := &Var{ + Name: "a", + Type: &Pointer{(*Void)(nil)}, + } + + err := probeBTF(v) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: creating the map may still + // succeed without Btf* attrs. + return internal.ErrNotSupported + } + return err +}) + +// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It +// is used as a proxy for ext_info (func_info) support, which depends on +// Func(Proto) by definition. +var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error { + if err := haveBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}) + +var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error { + if err := haveProgBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + Linkage: GlobalFunc, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}) + +var haveEnum64 = internal.NewFeatureTest("ENUM64", "6.0", func() error { + if err := haveBTF(); err != nil { + return err + } + + enum := &Enum{ + Size: 8, + Values: []EnumValue{ + {"TEST", math.MaxUint32 + 1}, + }, + } + + err := probeBTF(enum) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}) + +func probeBTF(typ Type) error { + b, err := NewBuilder([]Type{typ}) + if err != nil { + return err + } + + buf, err := b.Marshal(nil, nil) + if err != nil { + return err + } + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(buf), + BtfSize: uint32(len(buf)), + }) + + if err == nil { + fd.Close() + } + + return err +} diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go new file mode 100644 index 0000000000..5e581b4a85 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/format.go @@ -0,0 +1,350 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + typ = skipQualifiers(typ) + fmt.Fprintf(&gf.w, "type %s ", name) + if err := gf.writeTypeLit(typ, 0); err != nil { + return err + } + + e, ok := typ.(*Enum) + if !ok || len(e.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range e.Values { + id := gf.enumIdentifier(name, ev.Name) + var value any + if e.Signed { + value = int64(ev.Value) + } else { + value = ev.Value + } + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value) + } + gf.w.WriteString(")") + + return nil +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ = skipQualifiers(typ) + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + var err error + switch v := skipQualifiers(typ).(type) { + case *Int: + err = gf.writeIntLit(v) + + case *Enum: + if !v.Signed { + gf.w.WriteRune('u') + } + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + err = fmt.Errorf("invalid enum size %d", v.Size) + } + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + default: + return fmt.Errorf("type %T: %w", v, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) error { + bits := i.Size * 8 + switch i.Encoding { + case Bool: + if i.Size != 1 { + return fmt.Errorf("bool with size %d", i.Size) + } + gf.w.WriteString("bool") + case Char: + if i.Size != 1 { + return fmt.Errorf("char with size %d", i.Size) + } + // BTF doesn't have a way to specify the signedness of a char. Assume + // we are dealing with unsigned, since this works nicely with []byte + // in Go code. + fallthrough + case Unsigned, Signed: + stem := "uint" + if i.Encoding == Signed { + stem = "int" + } + if i.Size > 8 { + fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8) + } else { + fmt.Fprintf(&gf.w, "%s%d", stem, bits) + } + default: + return fmt.Errorf("can't encode %s", i.Encoding) + } + return nil +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.Offset.Bytes() + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + fieldSize, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + + prevOffset = offset + uint32(fieldSize) + if prevOffset > size { + return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size) + } + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("can't format %s as part of data section", vsi.Type) + } + + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 0000000000..adfa6fed4b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,317 @@ +package btf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 + + needsKernelBase bool +} + +// NewHandle loads the contents of a [Builder] into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandle(b *Builder) (*Handle, error) { + small := getByteSlice() + defer putByteSlice(small) + + buf, err := b.Marshal(*small, KernelMarshalOptions()) + if err != nil { + return nil, fmt.Errorf("marshal BTF: %w", err) + } + + return NewHandleFromRawBTF(buf) +} + +// NewHandleFromRawBTF loads raw BTF into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandleFromRawBTF(btf []byte) (*Handle, error) { + const minLogSize = 64 * 1024 + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + var ( + logBuf []byte + err error + ) + for { + var fd *sys.FD + fd, err = sys.BtfLoad(attr) + if err == nil { + return &Handle{fd, attr.BtfSize, false}, nil + } + + if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC + // if there are other verification errors. ENOSPC is only returned when + // the BTF blob is correct, a log was requested, and the provided buffer + // is too small. We're therefore not sure whether we got the full + // log or not. + break + } + + // Make an educated guess how large the buffer should be. Start + // at a reasonable minimum and then double the size. + logSize := uint32(max(len(logBuf)*2, minLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.BtfLogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.BtfLogTrueSize + } + + logBuf = make([]byte, logSize) + attr.BtfLogSize = logSize + attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogLevel = 1 + } + + if err := haveBTF(); err != nil { + return nil, err + } + + return nil, internal.ErrorWithLog("load btf", err, logBuf) +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size, info.IsModule()}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base must contain type information for vmlinux if the handle is for +// a kernel module. It may be nil otherwise. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + if h.needsKernelBase && base == nil { + return nil, fmt.Errorf("missing base types") + } + + return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsVmlinux returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the current handle. Only valid after a call to Next. + ID ID + // The current Handle. Only valid until a call to Next. + // See Take if you want to retain the handle. + Handle *Handle + err error +} + +// Next retrieves a handle for the next BTF object. +// +// Returns true if another BTF object was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next() bool { + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + break + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + break + } + + id = attr.NextId + handle, err := NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + break + } + + it.Handle.Close() + it.ID, it.Handle = id, handle + return true + } + + // No more handles or we encountered an error. + it.Handle.Close() + it.Handle = nil + return false +} + +// Take the ownership of the current handle. +// +// It's the callers responsibility to close the handle. +func (it *HandleIterator) Take() *Handle { + handle := it.Handle + it.Handle = nil + return handle +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} + +// FindHandle returns the first handle for which predicate returns true. +// +// Requires CAP_SYS_ADMIN. +// +// Returns an error wrapping ErrNotFound if predicate never returns true or if +// there is no BTF loaded into the kernel. +func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) { + it := new(HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, fmt.Errorf("info for ID %d: %w", it.ID, err) + } + + if predicate(info) { + return it.Take(), nil + } + } + if err := it.Err(); err != nil { + return nil, fmt.Errorf("iterate handles: %w", err) + } + + return nil, fmt.Errorf("find handle: %w", ErrNotFound) +} diff --git a/vendor/github.com/cilium/ebpf/btf/kernel.go b/vendor/github.com/cilium/ebpf/btf/kernel.go new file mode 100644 index 0000000000..8584ebcb93 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/kernel.go @@ -0,0 +1,159 @@ +package btf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" +) + +var kernelBTF = struct { + sync.RWMutex + kernel *Spec + modules map[string]*Spec +}{ + modules: make(map[string]*Spec), +} + +// FlushKernelSpec removes any cached kernel type information. +func FlushKernelSpec() { + kallsyms.FlushKernelModuleCache() + + kernelBTF.Lock() + defer kernelBTF.Unlock() + + kernelBTF.kernel = nil + kernelBTF.modules = make(map[string]*Spec) +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +func LoadKernelSpec() (*Spec, error) { + kernelBTF.RLock() + spec := kernelBTF.kernel + kernelBTF.RUnlock() + + if spec == nil { + kernelBTF.Lock() + defer kernelBTF.Unlock() + + spec = kernelBTF.kernel + } + + if spec != nil { + return spec.Copy(), nil + } + + spec, _, err := loadKernelSpec() + if err != nil { + return nil, err + } + + kernelBTF.kernel = spec + return spec.Copy(), nil +} + +// LoadKernelModuleSpec returns the BTF information for the named kernel module. +// +// Defaults to /sys/kernel/btf/. +// Returns an error wrapping ErrNotSupported if BTF is not enabled. +// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist. +func LoadKernelModuleSpec(module string) (*Spec, error) { + kernelBTF.RLock() + spec := kernelBTF.modules[module] + kernelBTF.RUnlock() + + if spec != nil { + return spec.Copy(), nil + } + + base, err := LoadKernelSpec() + if err != nil { + return nil, fmt.Errorf("load kernel spec: %w", err) + } + + kernelBTF.Lock() + defer kernelBTF.Unlock() + + if spec = kernelBTF.modules[module]; spec != nil { + return spec.Copy(), nil + } + + spec, err = loadKernelModuleSpec(module, base) + if err != nil { + return nil, err + } + + kernelBTF.modules[module] = spec + return spec.Copy(), nil +} + +func loadKernelSpec() (_ *Spec, fallback bool, _ error) { + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + spec, err := loadRawSpec(fh, internal.NativeEndian, nil) + return spec, false, err + } + + file, err := findVMLinux() + if err != nil { + return nil, false, err + } + defer file.Close() + + spec, err := LoadSpecFromReader(file) + return spec, true, err +} + +func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { + dir, file := filepath.Split(module) + if dir != "" || filepath.Ext(file) != "" { + return nil, fmt.Errorf("invalid module name %q", module) + } + + fh, err := os.Open(filepath.Join("/sys/kernel/btf", module)) + if err != nil { + return nil, err + } + defer fh.Close() + + return loadRawSpec(fh, internal.NativeEndian, base) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*os.File, error) { + release, err := internal.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := os.Open(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go new file mode 100644 index 0000000000..f14cfa6e97 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -0,0 +1,611 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "maps" + "math" + "slices" + "sync" + + "github.com/cilium/ebpf/internal" +) + +type MarshalOptions struct { + // Target byte order. Defaults to the system's native endianness. + Order binary.ByteOrder + // Remove function linkage information for compatibility with <5.6 kernels. + StripFuncLinkage bool + // Replace Enum64 with a placeholder for compatibility with <6.0 kernels. + ReplaceEnum64 bool + // Prevent the "No type found" error when loading BTF without any types. + PreventNoTypeFound bool +} + +// KernelMarshalOptions will generate BTF suitable for the current kernel. +func KernelMarshalOptions() *MarshalOptions { + return &MarshalOptions{ + Order: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + ReplaceEnum64: haveEnum64() != nil, + PreventNoTypeFound: true, // All current kernels require this. + } +} + +// encoder turns Types into raw BTF. +type encoder struct { + MarshalOptions + + pending internal.Deque[Type] + buf *bytes.Buffer + strings *stringTableBuilder + ids map[Type]TypeID + visited map[Type]struct{} + lastID TypeID +} + +var bufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, btfHeaderLen+128) + return &buf + }, +} + +func getByteSlice() *[]byte { + return bufferPool.Get().(*[]byte) +} + +func putByteSlice(buf *[]byte) { + *buf = (*buf)[:0] + bufferPool.Put(buf) +} + +// Builder turns Types into raw BTF. +// +// The default value may be used and represents an empty BTF blob. Void is +// added implicitly if necessary. +type Builder struct { + // Explicitly added types. + types []Type + // IDs for all added types which the user knows about. + stableIDs map[Type]TypeID + // Explicitly added strings. + strings *stringTableBuilder +} + +// NewBuilder creates a Builder from a list of types. +// +// It is more efficient than calling [Add] individually. +// +// Returns an error if adding any of the types fails. +func NewBuilder(types []Type) (*Builder, error) { + b := &Builder{ + make([]Type, 0, len(types)), + make(map[Type]TypeID, len(types)), + nil, + } + + for _, typ := range types { + _, err := b.Add(typ) + if err != nil { + return nil, fmt.Errorf("add %s: %w", typ, err) + } + } + + return b, nil +} + +// Empty returns true if neither types nor strings have been added. +func (b *Builder) Empty() bool { + return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0) +} + +// Add a Type and allocate a stable ID for it. +// +// Adding the identical Type multiple times is valid and will return the same ID. +// +// See [Type] for details on identity. +func (b *Builder) Add(typ Type) (TypeID, error) { + if b.stableIDs == nil { + b.stableIDs = make(map[Type]TypeID) + } + + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + if ds, ok := typ.(*Datasec); ok { + if err := datasecResolveWorkaround(b, ds); err != nil { + return 0, err + } + } + + id, ok := b.stableIDs[typ] + if ok { + return id, nil + } + + b.types = append(b.types, typ) + + id = TypeID(len(b.types)) + if int(id) != len(b.types) { + return 0, fmt.Errorf("no more type IDs") + } + + b.stableIDs[typ] = id + return id, nil +} + +// Marshal encodes all types in the Marshaler into BTF wire format. +// +// opts may be nil. +func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { + stb := b.strings + if stb == nil { + // Assume that most types are named. This makes encoding large BTF like + // vmlinux a lot cheaper. + stb = newStringTableBuilder(len(b.types)) + } else { + // Avoid modifying the Builder's string table. + stb = b.strings.Copy() + } + + if opts == nil { + opts = &MarshalOptions{Order: internal.NativeEndian} + } + + // Reserve space for the BTF header. + buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen] + + w := internal.NewBuffer(buf) + defer internal.PutBuffer(w) + + e := encoder{ + MarshalOptions: *opts, + buf: w, + strings: stb, + lastID: TypeID(len(b.types)), + visited: make(map[Type]struct{}, len(b.types)), + ids: maps.Clone(b.stableIDs), + } + + if e.ids == nil { + e.ids = make(map[Type]TypeID) + } + + types := b.types + if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound { + // We have strings that need to be written out, + // but no types (besides the implicit Void). + // Kernels as recent as v6.7 refuse to load such BTF + // with a "No type found" error in the log. + // Fix this by adding a dummy type. + types = []Type{&Int{Size: 0}} + } + + // Ensure that types are marshaled in the exact order they were Add()ed. + // Otherwise the ID returned from Add() won't match. + e.pending.Grow(len(types)) + for _, typ := range types { + e.pending.Push(typ) + } + + if err := e.deflatePending(); err != nil { + return nil, err + } + + length := e.buf.Len() + typeLen := uint32(length - btfHeaderLen) + + stringLen := e.strings.Length() + buf = e.strings.AppendEncoded(e.buf.Bytes()) + + // Fill out the header, and write it out. + header := &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(btfHeaderLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringLen), + } + + err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header) + if err != nil { + return nil, fmt.Errorf("write header: %v", err) + } + + return buf, nil +} + +// addString adds a string to the resulting BTF. +// +// Adding the same string multiple times will return the same result. +// +// Returns an identifier into the string table or an error if the string +// contains invalid characters. +func (b *Builder) addString(str string) (uint32, error) { + if b.strings == nil { + b.strings = newStringTableBuilder(0) + } + + return b.strings.Add(str) +} + +func (e *encoder) allocateIDs(root Type) (err error) { + visitInPostorder(root, e.visited, func(typ Type) bool { + if _, ok := typ.(*Void); ok { + return true + } + + if _, ok := e.ids[typ]; ok { + return true + } + + id := e.lastID + 1 + if id < e.lastID { + err = errors.New("type ID overflow") + return false + } + + e.pending.Push(typ) + e.ids[typ] = id + e.lastID = id + return true + }) + return +} + +// id returns the ID for the given type or panics with an error. +func (e *encoder) id(typ Type) TypeID { + if _, ok := typ.(*Void); ok { + return 0 + } + + id, ok := e.ids[typ] + if !ok { + panic(fmt.Errorf("no ID for type %v", typ)) + } + + return id +} + +func (e *encoder) deflatePending() error { + // Declare root outside of the loop to avoid repeated heap allocations. + var root Type + + for !e.pending.Empty() { + root = e.pending.Shift() + + // Allocate IDs for all children of typ, including transitive dependencies. + if err := e.allocateIDs(root); err != nil { + return err + } + + if err := e.deflateType(root); err != nil { + id := e.ids[root] + return fmt.Errorf("deflate %v with ID %d: %w", root, id, err) + } + } + + return nil +} + +func (e *encoder) deflateType(typ Type) (err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + panic(r) + } + } + }() + + var raw rawType + raw.NameOff, err = e.strings.Add(typ.TypeName()) + if err != nil { + return err + } + + switch v := typ.(type) { + case *Void: + return errors.New("Void is implicit in BTF wire format") + + case *Int: + raw.SetKind(kindInt) + raw.SetSize(v.Size) + + var bi btfInt + bi.SetEncoding(v.Encoding) + // We need to set bits in addition to size, since btf_type_int_is_regular + // otherwise flags this as a bitfield. + bi.SetBits(byte(v.Size) * 8) + raw.data = bi + + case *Pointer: + raw.SetKind(kindPointer) + raw.SetType(e.id(v.Target)) + + case *Array: + raw.SetKind(kindArray) + raw.data = &btfArray{ + e.id(v.Type), + e.id(v.Index), + v.Nelems, + } + + case *Struct: + raw.SetKind(kindStruct) + raw.SetSize(v.Size) + raw.data, err = e.convertMembers(&raw.btfType, v.Members) + + case *Union: + err = e.deflateUnion(&raw, v) + + case *Enum: + if v.Size == 8 { + err = e.deflateEnum64(&raw, v) + } else { + err = e.deflateEnum(&raw, v) + } + + case *Fwd: + raw.SetKind(kindForward) + raw.SetFwdKind(v.Kind) + + case *Typedef: + raw.SetKind(kindTypedef) + raw.SetType(e.id(v.Type)) + + case *Volatile: + raw.SetKind(kindVolatile) + raw.SetType(e.id(v.Type)) + + case *Const: + raw.SetKind(kindConst) + raw.SetType(e.id(v.Type)) + + case *Restrict: + raw.SetKind(kindRestrict) + raw.SetType(e.id(v.Type)) + + case *Func: + raw.SetKind(kindFunc) + raw.SetType(e.id(v.Type)) + if !e.StripFuncLinkage { + raw.SetLinkage(v.Linkage) + } + + case *FuncProto: + raw.SetKind(kindFuncProto) + raw.SetType(e.id(v.Return)) + raw.SetVlen(len(v.Params)) + raw.data, err = e.deflateFuncParams(v.Params) + + case *Var: + raw.SetKind(kindVar) + raw.SetType(e.id(v.Type)) + raw.data = btfVariable{uint32(v.Linkage)} + + case *Datasec: + raw.SetKind(kindDatasec) + raw.SetSize(v.Size) + raw.SetVlen(len(v.Vars)) + raw.data = e.deflateVarSecinfos(v.Vars) + + case *Float: + raw.SetKind(kindFloat) + raw.SetSize(v.Size) + + case *declTag: + raw.SetKind(kindDeclTag) + raw.SetType(e.id(v.Type)) + raw.data = &btfDeclTag{uint32(v.Index)} + raw.NameOff, err = e.strings.Add(v.Value) + + case *typeTag: + raw.SetKind(kindTypeTag) + raw.SetType(e.id(v.Type)) + raw.NameOff, err = e.strings.Add(v.Value) + + default: + return fmt.Errorf("don't know how to deflate %T", v) + } + + if err != nil { + return err + } + + return raw.Marshal(e.buf, e.Order) +} + +func (e *encoder) deflateUnion(raw *rawType, union *Union) (err error) { + raw.SetKind(kindUnion) + raw.SetSize(union.Size) + raw.data, err = e.convertMembers(&raw.btfType, union.Members) + return +} + +func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) { + bms := make([]btfMember, 0, len(members)) + isBitfield := false + for _, member := range members { + isBitfield = isBitfield || member.BitfieldSize > 0 + + offset := member.Offset + if isBitfield { + offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff) + } + + nameOff, err := e.strings.Add(member.Name) + if err != nil { + return nil, err + } + + bms = append(bms, btfMember{ + nameOff, + e.id(member.Type), + uint32(offset), + }) + } + + header.SetVlen(len(members)) + header.SetBitfield(isBitfield) + return bms, nil +} + +func (e *encoder) deflateEnum(raw *rawType, enum *Enum) (err error) { + raw.SetKind(kindEnum) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + // Signedness appeared together with ENUM64 support. + raw.SetSigned(enum.Signed && !e.ReplaceEnum64) + raw.data, err = e.deflateEnumValues(enum) + return +} + +func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) { + bes := make([]btfEnum, 0, len(enum.Values)) + for _, value := range enum.Values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + if enum.Signed { + if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name) + } + } else { + if value.Value > math.MaxUint32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name) + } + } + + bes = append(bes, btfEnum{ + nameOff, + uint32(value.Value), + }) + } + + return bes, nil +} + +func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) { + if e.ReplaceEnum64 { + // Replace the ENUM64 with a union of fields with the correct size. + // This matches libbpf behaviour on purpose. + placeholder := &Int{ + "enum64_placeholder", + enum.Size, + Unsigned, + } + if enum.Signed { + placeholder.Encoding = Signed + } + if err := e.allocateIDs(placeholder); err != nil { + return fmt.Errorf("add enum64 placeholder: %w", err) + } + + members := make([]Member, 0, len(enum.Values)) + for _, v := range enum.Values { + members = append(members, Member{ + Name: v.Name, + Type: placeholder, + }) + } + + return e.deflateUnion(raw, &Union{enum.Name, enum.Size, members}) + } + + raw.SetKind(kindEnum64) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + raw.SetSigned(enum.Signed) + raw.data, err = e.deflateEnum64Values(enum.Values) + return +} + +func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) { + bes := make([]btfEnum64, 0, len(values)) + for _, value := range values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + bes = append(bes, btfEnum64{ + nameOff, + uint32(value.Value), + uint32(value.Value >> 32), + }) + } + + return bes, nil +} + +func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) { + bps := make([]btfParam, 0, len(params)) + for _, param := range params { + nameOff, err := e.strings.Add(param.Name) + if err != nil { + return nil, err + } + + bps = append(bps, btfParam{ + nameOff, + e.id(param.Type), + }) + } + return bps, nil +} + +func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo { + vsis := make([]btfVarSecinfo, 0, len(vars)) + for _, v := range vars { + vsis = append(vsis, btfVarSecinfo{ + e.id(v.Type), + v.Offset, + v.Size, + }) + } + return vsis +} + +// MarshalMapKV creates a BTF object containing a map key and value. +// +// The function is intended for the use of the ebpf package and may be removed +// at any point in time. +func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) { + var b Builder + + if key != nil { + keyID, err = b.Add(key) + if err != nil { + return nil, 0, 0, fmt.Errorf("add key type: %w", err) + } + } + + if value != nil { + valueID, err = b.Add(value) + if err != nil { + return nil, 0, 0, fmt.Errorf("add value type: %w", err) + } + } + + handle, err := NewHandle(&b) + if err != nil { + // Check for 'full' map BTF support, since kernels between 4.18 and 5.2 + // already support BTF blobs for maps without Var or Datasec just fine. + if err := haveMapBTF(); err != nil { + return nil, 0, 0, err + } + } + return handle, keyID, valueID, err +} diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 0000000000..7c31461c30 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,198 @@ +package btf + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "maps" + "slices" + "strings" +) + +type stringTable struct { + base *stringTable + offsets []uint32 + prevIdx int + strings []string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + idx := len(base.offsets) - 1 + firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 + } + + // Derived from vmlinux BTF. + const averageStringLength = 16 + + n := int(r.Size() / averageStringLength) + offsets := make([]uint32, 0, n) + strings := make([]string, 0, n) + + offset := firstStringOffset + scanner := bufio.NewScanner(r) + scanner.Split(splitNull) + for scanner.Scan() { + str := scanner.Text() + offsets = append(offsets, offset) + strings = append(strings, str) + offset += uint32(len(str)) + 1 + } + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(strings) == 0 { + return nil, errors.New("string table is empty") + } + + if firstStringOffset == 0 && strings[0] != "" { + return nil, errors.New("first item in string table is non-empty") + } + + return &stringTable{base, offsets, 0, strings}, nil +} + +func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { + i := bytes.IndexByte(data, 0) + if i == -1 { + if atEOF && len(data) > 0 { + return 0, nil, errors.New("string table isn't null terminated") + } + return 0, nil, nil + } + + return i + 1, data[:i], nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { + return st.base.lookup(offset) + } + return st.lookup(offset) +} + +func (st *stringTable) lookup(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 && st.base == nil { + return "", nil + } + + // Accesses tend to be globally increasing, so check if the next string is + // the one we want. This skips the binary search in about 50% of cases. + if st.prevIdx+1 < len(st.offsets) && st.offsets[st.prevIdx+1] == offset { + st.prevIdx++ + return st.strings[st.prevIdx], nil + } + + i, found := slices.BinarySearch(st.offsets, offset) + if !found { + return "", fmt.Errorf("offset %d isn't start of a string", offset) + } + + // Set the new increment index, but only if its greater than the current. + if i > st.prevIdx+1 { + st.prevIdx = i + } + + return st.strings[i], nil +} + +// Num returns the number of strings in the table. +func (st *stringTable) Num() int { + return len(st.strings) +} + +// stringTableBuilder builds BTF string tables. +type stringTableBuilder struct { + length uint32 + strings map[string]uint32 +} + +// newStringTableBuilder creates a builder with the given capacity. +// +// capacity may be zero. +func newStringTableBuilder(capacity int) *stringTableBuilder { + var stb stringTableBuilder + + if capacity == 0 { + // Use the runtime's small default size. + stb.strings = make(map[string]uint32) + } else { + stb.strings = make(map[string]uint32, capacity) + } + + // Ensure that the empty string is at index 0. + stb.append("") + return &stb +} + +// Add a string to the table. +// +// Adding the same string multiple times will only store it once. +func (stb *stringTableBuilder) Add(str string) (uint32, error) { + if strings.IndexByte(str, 0) != -1 { + return 0, fmt.Errorf("string contains null: %q", str) + } + + offset, ok := stb.strings[str] + if ok { + return offset, nil + } + + return stb.append(str), nil +} + +func (stb *stringTableBuilder) append(str string) uint32 { + offset := stb.length + stb.length += uint32(len(str)) + 1 + stb.strings[str] = offset + return offset +} + +// Lookup finds the offset of a string in the table. +// +// Returns an error if str hasn't been added yet. +func (stb *stringTableBuilder) Lookup(str string) (uint32, error) { + offset, ok := stb.strings[str] + if !ok { + return 0, fmt.Errorf("string %q is not in table", str) + } + + return offset, nil +} + +// Length returns the length in bytes. +func (stb *stringTableBuilder) Length() int { + return int(stb.length) +} + +// AppendEncoded appends the string table to the end of the provided buffer. +func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte { + n := len(buf) + buf = append(buf, make([]byte, stb.Length())...) + strings := buf[n:] + for str, offset := range stb.strings { + copy(strings[offset:], str) + } + return buf +} + +// Copy the string table builder. +func (stb *stringTableBuilder) Copy() *stringTableBuilder { + return &stringTableBuilder{ + stb.length, + maps.Clone(stb.strings), + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go new file mode 100644 index 0000000000..c39dc66e46 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -0,0 +1,123 @@ +package btf + +import ( + "fmt" +) + +// Functions to traverse a cyclic graph of types. The below was very useful: +// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order + +// Visit all types reachable from root in postorder. +// +// Traversal stops if yield returns false. +// +// Returns false if traversal was aborted. +func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool { + if _, ok := visited[root]; ok { + return true + } + if visited == nil { + visited = make(map[Type]struct{}) + } + visited[root] = struct{}{} + + cont := children(root, func(child *Type) bool { + return visitInPostorder(*child, visited, yield) + }) + if !cont { + return false + } + + return yield(root) +} + +// children calls yield on each child of typ. +// +// Traversal stops if yield returns false. +// +// Returns false if traversal was aborted. +func children(typ Type, yield func(child *Type) bool) bool { + // Explicitly type switch on the most common types to allow the inliner to + // do its work. This avoids allocating intermediate slices from walk() on + // the heap. + switch v := typ.(type) { + case *Void, *Int, *Enum, *Fwd, *Float: + // No children to traverse. + case *Pointer: + if !yield(&v.Target) { + return false + } + case *Array: + if !yield(&v.Index) { + return false + } + if !yield(&v.Type) { + return false + } + case *Struct: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return false + } + } + case *Union: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return false + } + } + case *Typedef: + if !yield(&v.Type) { + return false + } + case *Volatile: + if !yield(&v.Type) { + return false + } + case *Const: + if !yield(&v.Type) { + return false + } + case *Restrict: + if !yield(&v.Type) { + return false + } + case *Func: + if !yield(&v.Type) { + return false + } + case *FuncProto: + if !yield(&v.Return) { + return false + } + for i := range v.Params { + if !yield(&v.Params[i].Type) { + return false + } + } + case *Var: + if !yield(&v.Type) { + return false + } + case *Datasec: + for i := range v.Vars { + if !yield(&v.Vars[i].Type) { + return false + } + } + case *declTag: + if !yield(&v.Type) { + return false + } + case *typeTag: + if !yield(&v.Type) { + return false + } + case *cycle: + // cycle has children, but we ignore them deliberately. + default: + panic(fmt.Sprintf("don't know how to walk Type %T", v)) + } + + return true +} diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 0000000000..a3397460b9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,1319 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// Mirrors MAX_RESOLVE_DEPTH in libbpf. +// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761 +const maxResolveDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID = sys.TypeID + +// Type represents a type described by BTF. +// +// Identity of Type follows the [Go specification]: two Types are considered +// equal if they have the same concrete type and the same dynamic value, aka +// they point at the same location in memory. This means that the following +// Types are considered distinct even though they have the same "shape". +// +// a := &Int{Size: 1} +// b := &Int{Size: 1} +// a != b +// +// [Go specification]: https://go.dev/ref/spec#Comparison_operators +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // New implementations must update walkType. +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) + _ Type = (*declTag)(nil) + _ Type = (*typeTag)(nil) + _ Type = (*cycle)(nil) +) + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } + +type IntEncoding byte + +// Valid IntEncodings. +// +// These may look like they are flags, but they aren't. +const ( + Unsigned IntEncoding = 0 + Signed IntEncoding = 1 + Char IntEncoding = 2 + Bool IntEncoding = 4 +) + +func (ie IntEncoding) String() string { + switch ie { + case Char: + // NB: There is no way to determine signedness for char. + return "char" + case Bool: + return "bool" + case Signed: + return "signed" + case Unsigned: + return "unsigned" + default: + return fmt.Sprintf("IntEncoding(%d)", byte(ie)) + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + Type + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + // True if the values should be interpreted as signed integers. + Signed bool + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value uint64 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +func (f *Fwd) matches(typ Type) bool { + if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct { + return true + } + + if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion { + return true + } + + return false +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) copy() Type { + cpy := *td + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction. +func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction { + ins.Metadata.Set(funcInfoMeta{}, fn) + return ins +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) copy() Type { + cpy := *f + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) copy() Type { + cpy := *v + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + // Var or Func. + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// declTag associates metadata with a declaration. +type declTag struct { + Type Type + Value string + // The index this tag refers to in the target type. For composite types, + // a value of -1 indicates that the tag refers to the whole type. Otherwise + // it indicates which member or argument the tag applies to. + Index int +} + +func (dt *declTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index) +} + +func (dt *declTag) TypeName() string { return "" } +func (dt *declTag) copy() Type { + cpy := *dt + return &cpy +} + +// typeTag associates metadata with a type. +type typeTag struct { + Type Type + Value string +} + +func (tt *typeTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value) +} + +func (tt *typeTag) TypeName() string { return "" } +func (tt *typeTag) qualify() Type { return tt.Type } +func (tt *typeTag) copy() Type { + cpy := *tt + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) + _ qualifier = (*typeTag)(nil) +) + +var errUnsizedType = errors.New("type is unsized") + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxResolveDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Returns an error if the Type can't be aligned, like an integer with an uneven +// size. Currently only supports the subset of types necessary for bitfield +// relocations. +func alignof(typ Type) (int, error) { + var n int + + switch t := UnderlyingType(typ).(type) { + case *Enum: + n = int(t.size()) + case *Int: + n = int(t.Size) + case *Array: + return alignof(t.Type) + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } + + if !internal.IsPow(n) { + return 0, fmt.Errorf("alignment value %d is not a power of two", n) + } + + return n, nil +} + +// Copy a Type recursively. +// +// typ may form a cycle. +func Copy(typ Type) Type { + return copyType(typ, nil, make(map[Type]Type), nil) +} + +func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type { + if typ == nil { + return nil + } + + cpy, ok := copies[typ] + if ok { + // This has been copied previously, no need to continue. + return cpy + } + + cpy = typ.copy() + copies[typ] = cpy + + if id, ok := ids[typ]; ok { + copiedIDs[cpy] = id + } + + children(cpy, func(child *Type) bool { + *child = copyType(*child, ids, copies, copiedIDs) + return true + }) + + return cpy +} + +type typeDeque = internal.Deque[*Type] + +// readAndInflateTypes reads the raw btf type info and turns it into a graph +// of Types connected via pointers. +// +// If base is provided, then the types are considered to be of a split BTF +// (e.g., a kernel module). +// +// Returns a slice of types indexed by TypeID. Since BTF ignores compilation +// units, multiple types may share the same name. A Type may form a cyclic graph +// by pointing at itself. +func readAndInflateTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32, rawStrings *stringTable, base *Spec) ([]Type, error) { + // because of the interleaving between types and struct members it is difficult to + // precompute the numbers of raw types this will parse + // this "guess" is a good first estimation + sizeOfbtfType := uintptr(btfTypeLen) + tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 + types := make([]Type, 0, tyMaxCount) + + // Void is defined to always be type ID 0, and is thus omitted from BTF. + types = append(types, (*Void)(nil)) + + firstTypeID := TypeID(0) + if base != nil { + var err error + firstTypeID, err = base.nextTypeID() + if err != nil { + return nil, err + } + + // Split BTF doesn't contain Void. + types = types[:0] + } + + type fixupDef struct { + id TypeID + typ *Type + } + + var fixups []fixupDef + fixup := func(id TypeID, typ *Type) { + if id < firstTypeID { + if baseType, err := base.TypeByID(id); err == nil { + *typ = baseType + return + } + } + + idx := int(id - firstTypeID) + if idx < len(types) { + // We've already inflated this type, fix it up immediately. + *typ = types[idx] + return + } + + fixups = append(fixups, fixupDef{id, typ}) + } + + type bitfieldFixupDef struct { + id TypeID + m *Member + } + + var ( + legacyBitfields = make(map[TypeID][2]Bits) // offset, size + bitfieldFixups []bitfieldFixupDef + ) + convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { + // NB: The fixup below relies on pre-allocating this array to + // work, since otherwise append might re-allocate members. + members := make([]Member, 0, len(raw)) + for i, btfMember := range raw { + name, err := rawStrings.Lookup(btfMember.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(btfMember.Offset), + }) + + m := &members[i] + fixup(raw[i].Type, &m.Type) + + if kindFlag { + m.BitfieldSize = Bits(btfMember.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := legacyBitfields[raw[i].Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + + if m.Type != nil { + // We couldn't find a legacy bitfield, but we know that the member's + // type has already been inflated. Hence we know that it can't be + // a legacy bitfield and there is nothing left to do. + continue + } + + // We don't have fixup data, and the type we're pointing + // at hasn't been inflated yet. No choice but to defer + // the fixup. + bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{ + raw[i].Type, + m, + }) + } + return members, nil + } + + var ( + buf = make([]byte, 1024) + header btfType + bInt btfInt + bArr btfArray + bMembers []btfMember + bEnums []btfEnum + bParams []btfParam + bVariable btfVariable + bSecInfos []btfVarSecinfo + bDeclTag btfDeclTag + bEnums64 []btfEnum64 + ) + + var declTags []*declTag + for { + var ( + id = firstTypeID + TypeID(len(types)) + typ Type + ) + + if _, err := io.ReadFull(r, buf[:btfTypeLen]); err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) + } + + if _, err := unmarshalBtfType(&header, buf[:btfTypeLen], bo); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } + + if id < firstTypeID { + return nil, fmt.Errorf("no more type IDs") + } + + name, err := rawStrings.Lookup(header.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch header.Kind() { + case kindInt: + size := header.Size() + buf = buf[:btfIntLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfInt, id: %d: %w", id, err) + } + if _, err := unmarshalBtfInt(&bInt, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} + } + typ = &Int{name, header.Size(), bInt.Encoding()} + + case kindPointer: + ptr := &Pointer{nil} + fixup(header.Type(), &ptr.Target) + typ = ptr + + case kindArray: + buf = buf[:btfArrayLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfArray, id: %d: %w", id, err) + } + if _, err := unmarshalBtfArray(&bArr, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) + typ = arr + + case kindStruct: + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + typ = &Struct{name, header.Size(), members} + + case kindUnion: + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + typ = &Union{name, header.Size(), members} + + case kindEnum: + vlen := header.Vlen() + bEnums = slices.Grow(bEnums[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnumLen)[:vlen*btfEnumLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnums, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums(bEnums, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnums, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + signed := header.Signed() + for i, btfVal := range bEnums { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + value := uint64(btfVal.Val) + if signed { + // Sign extend values to 64 bit. + value = uint64(int32(btfVal.Val)) + } + vals = append(vals, EnumValue{name, value}) + } + typ = &Enum{name, header.Size(), signed, vals} + + case kindForward: + typ = &Fwd{name, header.FwdKind()} + + case kindTypedef: + typedef := &Typedef{name, nil} + fixup(header.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + fixup(header.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + fixup(header.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + fixup(header.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, header.Linkage()} + fixup(header.Type(), &fn.Type) + typ = fn + + case kindFuncProto: + vlen := header.Vlen() + bParams = slices.Grow(bParams[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfParamLen)[:vlen*btfParamLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfParams, id: %d: %w", id, err) + } + if _, err := unmarshalBtfParams(bParams, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfParams, id: %d: %w", id, err) + } + + params := make([]FuncParam, 0, vlen) + for i, param := range bParams { + name, err := rawStrings.Lookup(param.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + params = append(params, FuncParam{ + Name: name, + }) + } + for i := range params { + fixup(bParams[i].Type, ¶ms[i].Type) + } + + fp := &FuncProto{nil, params} + fixup(header.Type(), &fp.Return) + typ = fp + + case kindVar: + buf = buf[:btfVariableLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVariable(&bVariable, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage)} + fixup(header.Type(), &v.Type) + typ = v + + case kindDatasec: + vlen := header.Vlen() + bSecInfos = slices.Grow(bSecInfos[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfVarSecinfoLen)[:vlen*btfVarSecinfoLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVarSecInfos, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVarSecInfos(bSecInfos, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecInfos, id: %d: %w", id, err) + } + + vars := make([]VarSecinfo, 0, vlen) + for _, btfVar := range bSecInfos { + vars = append(vars, VarSecinfo{ + Offset: btfVar.Offset, + Size: btfVar.Size, + }) + } + for i := range vars { + fixup(bSecInfos[i].Type, &vars[i].Type) + } + typ = &Datasec{name, header.Size(), vars} + + case kindFloat: + typ = &Float{name, header.Size()} + + case kindDeclTag: + buf = buf[:btfDeclTagLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + if _, err := unmarshalBtfDeclTag(&bDeclTag, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx + if uint64(btfIndex) > math.MaxInt { + return nil, fmt.Errorf("type id %d: index exceeds int", id) + } + + dt := &declTag{nil, name, int(int32(btfIndex))} + fixup(header.Type(), &dt.Type) + typ = dt + + declTags = append(declTags, dt) + + case kindTypeTag: + tt := &typeTag{nil, name} + fixup(header.Type(), &tt.Type) + typ = tt + + case kindEnum64: + vlen := header.Vlen() + bEnums64 = slices.Grow(bEnums64[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnum64Len)[:vlen*btfEnum64Len] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnum64s, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums64(bEnums64, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64s, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + for i, btfVal := range bEnums64 { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) + } + value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32) + vals = append(vals, EnumValue{name, value}) + } + typ = &Enum{name, header.Size(), header.Signed(), vals} + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) + } + + types = append(types, typ) + } + + for _, fixup := range fixups { + if fixup.id < firstTypeID { + return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id) + } + + idx := int(fixup.id - firstTypeID) + if idx >= len(types) { + return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) + } + + *fixup.typ = types[idx] + } + + for _, bitfieldFixup := range bitfieldFixups { + if bitfieldFixup.id < firstTypeID { + return nil, fmt.Errorf("bitfield fixup from split to base types is not expected") + } + + data, ok := legacyBitfields[bitfieldFixup.id] + if ok { + // This is indeed a legacy bitfield, fix it up. + bitfieldFixup.m.Offset += data[0] + bitfieldFixup.m.BitfieldSize = data[1] + } + } + + for _, dt := range declTags { + switch t := dt.Type.(type) { + case *Var, *Typedef: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index) + } + + case composite: + if dt.Index >= len(t.members()) { + return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t) + } + + case *Func: + fp, ok := t.Type.(*FuncProto) + if !ok { + return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) + } + + if dt.Index >= len(fp.Params) { + return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t) + } + + default: + return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) + } + } + + return types, nil +} + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs +// until it finds a T. +// +// Returns the zero value and false if there is no T or if the type is nested +// too deeply. +func As[T Type](typ Type) (T, bool) { + // NB: We can't make this function return (*T) since then + // we can't assert that a type matches an interface which + // embeds Type: as[composite](T). + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (typ).(type) { + case T: + return v, true + case qualifier: + typ = v.qualify() + case *Typedef: + typ = v.Type + default: + goto notFound + } + } +notFound: + var zero T + return zero, false +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + _, _ = io.WriteString(f, internal.GoTypeName(t)) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go new file mode 100644 index 0000000000..12a89b87ee --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go @@ -0,0 +1,26 @@ +package btf + +// datasecResolveWorkaround ensures that certain vars in a Datasec are added +// to a Spec before the Datasec. This avoids a bug in kernel BTF validation. +// +// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/ +func datasecResolveWorkaround(b *Builder, ds *Datasec) error { + for _, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + continue + } + + switch v.Type.(type) { + case *Typedef, *Volatile, *Const, *Restrict, *typeTag: + // NB: We must never call Add on a Datasec, otherwise we risk + // infinite recursion. + _, err := b.Add(v.Type) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go new file mode 100644 index 0000000000..b2cb214adc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -0,0 +1,925 @@ +package ebpf + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kconfig" + "github.com/cilium/ebpf/internal/sysenc" +) + +// CollectionOptions control loading a collection into the kernel. +// +// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions. +type CollectionOptions struct { + Maps MapOptions + Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map +} + +// CollectionSpec describes a collection. +type CollectionSpec struct { + Maps map[string]*MapSpec + Programs map[string]*ProgramSpec + + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder +} + +// Copy returns a recursive copy of the spec. +func (cs *CollectionSpec) Copy() *CollectionSpec { + if cs == nil { + return nil + } + + cpy := CollectionSpec{ + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + ByteOrder: cs.ByteOrder, + Types: cs.Types.Copy(), + } + + for name, spec := range cs.Maps { + cpy.Maps[name] = spec.Copy() + } + + for name, spec := range cs.Programs { + cpy.Programs[name] = spec.Copy() + } + + return &cpy +} + +// RewriteMaps replaces all references to specific maps. +// +// Use this function to use pre-existing maps instead of creating new ones +// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. +// +// Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. +func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { + for symbol, m := range maps { + // have we seen a program that uses this symbol / map + seen := false + for progName, progSpec := range cs.Programs { + err := progSpec.Instructions.AssociateMap(symbol, m) + + switch { + case err == nil: + seen = true + + case errors.Is(err, asm.ErrUnreferencedSymbol): + // Not all programs need to use the map + + default: + return fmt.Errorf("program %s: %w", progName, err) + } + } + + if !seen { + return fmt.Errorf("map %s not referenced by any programs", symbol) + } + + // Prevent NewCollection from creating rewritten maps + delete(cs.Maps, symbol) + } + + return nil +} + +// MissingConstantsError is returned by [CollectionSpec.RewriteConstants]. +type MissingConstantsError struct { + // The constants missing from .rodata. + Constants []string +} + +func (m *MissingConstantsError) Error() string { + return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", ")) +} + +// RewriteConstants replaces the value of multiple constants. +// +// The constant must be defined like so in the C program: +// +// volatile const type foobar; +// volatile const type foobar = default; +// +// Replacement values must be of the same length as the C sizeof(type). +// If necessary, they are marshalled according to the same rules as +// map values. +// +// From Linux 5.5 the verifier will use constants to eliminate dead code. +// +// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. +func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { + replaced := make(map[string]bool) + + for name, spec := range cs.Maps { + if !strings.HasPrefix(name, ".rodata") { + continue + } + + b, ds, err := spec.dataSection() + if errors.Is(err, errMapNoBTFValue) { + // Data sections without a BTF Datasec are valid, but don't support + // constant replacements. + continue + } + if err != nil { + return fmt.Errorf("map %s: %w", name, err) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + for _, v := range ds.Vars { + vname := v.Type.TypeName() + replacement, ok := consts[vname] + if !ok { + continue + } + + if _, ok := v.Type.(*btf.Var); !ok { + return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname) + } + + if replaced[vname] { + return fmt.Errorf("section %s: duplicate variable %s", name, vname) + } + + if int(v.Offset+v.Size) > len(cpy) { + return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname) + } + + b, err := sysenc.Marshal(replacement, int(v.Size)) + if err != nil { + return fmt.Errorf("marshaling constant replacement %s: %w", vname, err) + } + + b.CopyTo(cpy[v.Offset : v.Offset+v.Size]) + + replaced[vname] = true + } + + spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy} + } + + var missing []string + for c := range consts { + if !replaced[c] { + missing = append(missing, c) + } + } + + if len(missing) != 0 { + return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing}) + } + + return nil +} + +// Assign the contents of a CollectionSpec to a struct. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same MapSpec or ProgramSpec is assigned multiple times. +func (cs *CollectionSpec) Assign(to interface{}) error { + // Assign() only supports assigning ProgramSpecs and MapSpecs, + // so doesn't load any resources into the kernel. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*ProgramSpec)(nil)): + if p := cs.Programs[name]; p != nil { + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*MapSpec)(nil)): + if m := cs.Maps[name]; m != nil { + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + return assignValues(to, getValue) +} + +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. +// +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// opts may be nil. +// +// Returns an error if any of the fields can't be found, or +// if the same Map or Program is assigned multiple times. +func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err + } + defer loader.close() + + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + assignedProgs[name] = true + return loader.loadProgram(name) + + case reflect.TypeOf((*Map)(nil)): + assignedMaps[name] = true + return loader.loadMap(name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { + return err + } + + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateDeferredMaps(); err != nil { + return err + } + + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + switch m.typ { + case ProgramArray: + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } + + return nil +} + +// Collection is a collection of Programs and Maps associated +// with their symbols +type Collection struct { + Programs map[string]*Program + Maps map[string]*Map +} + +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollection(spec *CollectionSpec) (*Collection, error) { + return NewCollectionWithOptions(spec, CollectionOptions{}) +} + +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() + + // Create maps first, as their fds need to be linked into programs. + for mapName := range spec.Maps { + if _, err := loader.loadMap(mapName); err != nil { + return nil, err + } + } + + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + + if _, err := loader.loadProgram(progName); err != nil { + return nil, err + } + } + + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateDeferredMaps(); err != nil { + return nil, err + } + + // Prevent loader.cleanup from closing maps and programs. + maps, progs := loader.maps, loader.programs + loader.maps, loader.programs = nil, nil + + return &Collection{ + progs, + maps, + }, nil +} + +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program +} + +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { + if opts == nil { + opts = &CollectionOptions{} + } + + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name, m := range opts.MapReplacements { + spec, ok := coll.Maps[name] + if !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) + } + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) + } + } + + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + }, nil +} + +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil + } + + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) + } + + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() + if err != nil { + return nil, err + } + + cl.maps[mapName] = m + return m, nil + } + + m, err := newMapWithOptions(mapSpec, cl.opts.Maps) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) + } + + // Finalize 'scalar' maps that don't refer to any other eBPF resources + // potentially pending creation. This is needed for frozen maps like .rodata + // that need to be finalized before invoking the verifier. + if !mapSpec.Type.canStoreMapOrProgram() { + if err := m.finalize(mapSpec); err != nil { + return nil, fmt.Errorf("finalizing map %s: %w", mapName, err) + } + } + + cl.maps[mapName] = m + return m, nil +} + +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } + + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } + + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } + + progSpec = progSpec.Copy() + + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference() == "" { + continue + } + + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { + continue + } + + m, err := cl.loadMap(ins.Reference()) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) + } + } + + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + cl.programs[progName] = prog + return prog, nil +} + +// populateDeferredMaps iterates maps holding programs or other maps and loads +// any dependencies. Populates all maps in cl and freezes them if specified. +func (cl *collectionLoader) populateDeferredMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } + + // Scalar maps without Map or Program references are finalized during + // creation. Don't finalize them again. + if !mapSpec.Type.canStoreMapOrProgram() { + continue + } + + mapSpec = mapSpec.Copy() + + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. + for i, kv := range mapSpec.Contents { + objName, ok := kv.Value.(string) + if !ok { + continue + } + + switch t := mapSpec.Type; { + case t.canStoreProgram(): + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case t.canStoreMap(): + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} + } + } + + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) + } + } + + return nil +} + +// resolveKconfig resolves all variables declared in .kconfig and populates +// m.Contents. Does nothing if the given m.Contents is non-empty. +func resolveKconfig(m *MapSpec) error { + ds, ok := m.Value.(*btf.Datasec) + if !ok { + return errors.New("map value is not a Datasec") + } + + type configInfo struct { + offset uint32 + typ btf.Type + } + + configs := make(map[string]configInfo) + + data := make([]byte, ds.Size) + for _, vsi := range ds.Vars { + v := vsi.Type.(*btf.Var) + n := v.TypeName() + + switch n { + case "LINUX_KERNEL_VERSION": + if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 { + return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) + } + + kv, err := internal.KernelVersion() + if err != nil { + return fmt.Errorf("getting kernel version: %w", err) + } + internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel()) + + case "LINUX_HAS_SYSCALL_WRAPPER": + integer, ok := v.Type.(*btf.Int) + if !ok { + return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type) + } + var value uint64 = 1 + if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) { + value = 0 + } else if err != nil { + return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil { + return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + default: // Catch CONFIG_*. + configs[n] = configInfo{ + offset: vsi.Offset, + typ: v.Type, + } + } + } + + // We only parse kconfig file if a CONFIG_* variable was found. + if len(configs) > 0 { + f, err := kconfig.Find() + if err != nil { + return fmt.Errorf("cannot find a kconfig file: %w", err) + } + defer f.Close() + + filter := make(map[string]struct{}, len(configs)) + for config := range configs { + filter[config] = struct{}{} + } + + kernelConfig, err := kconfig.Parse(f, filter) + if err != nil { + return fmt.Errorf("cannot parse kconfig file: %w", err) + } + + for n, info := range configs { + value, ok := kernelConfig[n] + if !ok { + return fmt.Errorf("config option %q does not exists for this kernel", n) + } + + err := kconfig.PutValue(data[info.offset:], info.typ, value) + if err != nil { + return fmt.Errorf("problem adding value for %s: %w", n, err) + } + } + } + + m.Contents = []MapKV{{uint32(0), data}} + + return nil +} + +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func LoadCollection(file string) (*Collection, error) { + spec, err := LoadCollectionSpec(file) + if err != nil { + return nil, err + } + return NewCollection(spec) +} + +// Assign the contents of a Collection to a struct. +// +// This function bridges functionality between bpf2go generated +// code and any functionality better implemented in Collection. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Map or Program is assigned multiple times. +// +// Ownership and Close()ing responsibility is transferred to `to` +// for any successful assigns. On error `to` is left in an undefined state. +func (coll *Collection) Assign(to interface{}) error { + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + + // Assign() only transfers already-loaded Maps and Programs. No extra + // loading is done. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + if p := coll.Programs[name]; p != nil { + assignedProgs[name] = true + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*Map)(nil)): + if m := coll.Maps[name]; m != nil { + assignedMaps[name] = true + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + if err := assignValues(to, getValue); err != nil { + return err + } + + // Finalize ownership transfer + for p := range assignedProgs { + delete(coll.Programs, p) + } + for m := range assignedMaps { + delete(coll.Maps, m) + } + + return nil +} + +// Close frees all maps and programs associated with the collection. +// +// The collection mustn't be used afterwards. +func (coll *Collection) Close() { + for _, prog := range coll.Programs { + prog.Close() + } + for _, m := range coll.Maps { + m.Close() + } +} + +// DetachMap removes the named map from the Collection. +// +// This means that a later call to Close() will not affect this map. +// +// Returns nil if no map of that name exists. +func (coll *Collection) DetachMap(name string) *Map { + m := coll.Maps[name] + delete(coll.Maps, name) + return m +} + +// DetachProgram removes the named program from the Collection. +// +// This means that a later call to Close() will not affect this program. +// +// Returns nil if no program of that name exists. +func (coll *Collection) DetachProgram(name string) *Program { + p := coll.Programs[name] + delete(coll.Programs, name) + return p +} + +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} + +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) + } + + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) + } + + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) + } + + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} + + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue + } + + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { + continue + } + + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } + + // Obtain the destination type of the pointer. + v = field.value.Elem() + + case reflect.Struct: + // Reference the value's type directly. + v = field.value + + default: + continue + } + + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + + fields = append(fields, inner...) + } + + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + + toValue := reflect.ValueOf(to) + if toValue.Type().Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer to struct", to) + } + + if toValue.IsNil() { + return fmt.Errorf("nil pointer to %T", to) + } + + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { + return err + } + + type elem struct { + // Either *Map or *Program + typ reflect.Type + name string + } + + assigned := make(map[elem]string) + for _, field := range fields { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { + return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) + } + + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) + } + + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) + if err != nil { + return fmt.Errorf("field %s: %w", field.Name, err) + } + + if !field.value.CanSet() { + return fmt.Errorf("field %s: can't set value", field.Name) + } + field.value.Set(reflect.ValueOf(value)) + + assigned[e] = field.Name + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/cpu.go b/vendor/github.com/cilium/ebpf/cpu.go new file mode 100644 index 0000000000..07e959efdc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/cpu.go @@ -0,0 +1,66 @@ +package ebpf + +import ( + "fmt" + "os" + "strings" + "sync" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return parseCPUsFromFile("/sys/devices/system/cpu/possible") +}) + +// PossibleCPU returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPU() (int, error) { + return possibleCPU() +} + +// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if +// the error is non-nil. +func MustPossibleCPU() int { + cpus, err := PossibleCPU() + if err != nil { + panic(err) + } + return cpus +} + +func parseCPUsFromFile(path string) (int, error) { + spec, err := os.ReadFile(path) + if err != nil { + return 0, err + } + + n, err := parseCPUs(string(spec)) + if err != nil { + return 0, fmt.Errorf("can't parse %s: %v", path, err) + } + + return n, nil +} + +// parseCPUs parses the number of cpus from a string produced +// by bitmap_list_string() in the Linux kernel. +// Multiple ranges are rejected, since they can't be unified +// into a single number. +// This is the format of /sys/devices/system/cpu/possible, it +// is not suitable for /sys/devices/system/cpu/online, etc. +func parseCPUs(spec string) (int, error) { + if strings.Trim(spec, "\n") == "0" { + return 1, nil + } + + var low, high int + n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) + if n != 2 || err != nil { + return 0, fmt.Errorf("invalid format: %s", spec) + } + if low != 0 { + return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go new file mode 100644 index 0000000000..396b3394d3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/doc.go @@ -0,0 +1,25 @@ +// Package ebpf is a toolkit for working with eBPF programs. +// +// eBPF programs are small snippets of code which are executed directly +// in a VM in the Linux kernel, which makes them very fast and flexible. +// Many Linux subsystems now accept eBPF programs. This makes it possible +// to implement highly application specific logic inside the kernel, +// without having to modify the actual kernel itself. +// +// This package is designed for long-running processes which +// want to use eBPF to implement part of their application logic. It has no +// run-time dependencies outside of the library and the Linux kernel itself. +// eBPF code should be compiled ahead of time using clang, and shipped with +// your application as any other resource. +// +// Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. +package ebpf diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go new file mode 100644 index 0000000000..620037d80a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -0,0 +1,1337 @@ +package ebpf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +type kconfigMetaKey struct{} + +type kconfigMeta struct { + Map *MapSpec + Offset uint32 +} + +type kfuncMetaKey struct{} + +type kfuncMeta struct { + Binding elf.SymBind + Func *btf.Func +} + +// elfCode is a convenience to reduce the amount of arguments that have to +// be passed around explicitly. You should treat its contents as immutable. +type elfCode struct { + *internal.SafeELFFile + sections map[elf.SectionIndex]*elfSection + license string + version uint32 + btf *btf.Spec + extInfo *btf.ExtInfos + maps map[string]*MapSpec + kfuncs map[string]*btf.Func + kconfig *MapSpec +} + +// LoadCollectionSpec parses an ELF file into a CollectionSpec. +func LoadCollectionSpec(file string) (*CollectionSpec, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + spec, err := LoadCollectionSpecFromReader(f) + if err != nil { + return nil, fmt.Errorf("file %s: %w", file, err) + } + return spec, nil +} + +// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. +func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { + f, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, err + } + + // Checks if the ELF file is for BPF data. + // Old LLVM versions set e_machine to EM_NONE. + if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF { + return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine) + } + + var ( + licenseSection *elf.Section + versionSection *elf.Section + sections = make(map[elf.SectionIndex]*elfSection) + relSections = make(map[elf.SectionIndex]*elf.Section) + ) + + // This is the target of relocations generated by inline assembly. + sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection) + + // Collect all the sections we're interested in. This includes relocations + // which we parse later. + // + // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date. + for i, sec := range f.Sections { + idx := elf.SectionIndex(i) + + switch { + case strings.HasPrefix(sec.Name, "license"): + licenseSection = sec + case strings.HasPrefix(sec.Name, "version"): + versionSection = sec + case strings.HasPrefix(sec.Name, "maps"): + sections[idx] = newElfSection(sec, mapSection) + case sec.Name == ".maps": + sections[idx] = newElfSection(sec, btfMapSection) + case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"): + sections[idx] = newElfSection(sec, dataSection) + case sec.Type == elf.SHT_REL: + // Store relocations under the section index of the target + relSections[elf.SectionIndex(sec.Info)] = sec + case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: + sections[idx] = newElfSection(sec, programSection) + } + } + + license, err := loadLicense(licenseSection) + if err != nil { + return nil, fmt.Errorf("load license: %w", err) + } + + version, err := loadVersion(versionSection, f.ByteOrder) + if err != nil { + return nil, fmt.Errorf("load version: %w", err) + } + + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) + if err != nil && !errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + ec := &elfCode{ + SafeELFFile: f, + sections: sections, + license: license, + version: version, + btf: btfSpec, + extInfo: btfExtInfo, + maps: make(map[string]*MapSpec), + kfuncs: make(map[string]*btf.Func), + } + + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } + + ec.assignSymbols(symbols) + + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) + } + + if err := ec.loadMaps(); err != nil { + return nil, fmt.Errorf("load maps: %w", err) + } + + if err := ec.loadBTFMaps(); err != nil { + return nil, fmt.Errorf("load BTF maps: %w", err) + } + + if err := ec.loadDataSections(); err != nil { + return nil, fmt.Errorf("load data sections: %w", err) + } + + if err := ec.loadKconfigSection(); err != nil { + return nil, fmt.Errorf("load virtual .kconfig section: %w", err) + } + + if err := ec.loadKsymsSection(); err != nil { + return nil, fmt.Errorf("load virtual .ksyms section: %w", err) + } + + // Finally, collect programs and link them. + progs, err := ec.loadProgramSections() + if err != nil { + return nil, fmt.Errorf("load programs: %w", err) + } + + return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil +} + +func loadLicense(sec *elf.Section) (string, error) { + if sec == nil { + return "", nil + } + + data, err := sec.Data() + if err != nil { + return "", fmt.Errorf("section %s: %v", sec.Name, err) + } + return string(bytes.TrimRight(data, "\000")), nil +} + +func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { + if sec == nil { + return 0, nil + } + + var version uint32 + if err := binary.Read(sec.Open(), bo, &version); err != nil { + return 0, fmt.Errorf("section %s: %v", sec.Name, err) + } + return version, nil +} + +type elfSectionKind int + +const ( + undefSection elfSectionKind = iota + mapSection + btfMapSection + programSection + dataSection +) + +type elfSection struct { + *elf.Section + kind elfSectionKind + // Offset from the start of the section to a symbol + symbols map[uint64]elf.Symbol + // Offset from the start of the section to a relocation, which points at + // a symbol in another section. + relocations map[uint64]elf.Symbol + // The number of relocations pointing at this section. + references int +} + +func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { + return &elfSection{ + section, + kind, + make(map[uint64]elf.Symbol), + make(map[uint64]elf.Symbol), + 0, + } +} + +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + + symSection.symbols[symbol.Value] = symbol + } +} + +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + + // Generate a ProgramSpec for each function found in each program section. + var export []string + for _, sec := range ec.sections { + if sec.kind != programSection { + continue + } + + if len(sec.symbols) == 0 { + return nil, fmt.Errorf("section %v: missing symbols", sec.Name) + } + + funcs, err := ec.loadFunctions(sec) + if err != nil { + return nil, fmt.Errorf("section %v: %w", sec.Name, err) + } + + progType, attachType, progFlags, attachTo := getProgType(sec.Name) + + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + } + + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) + } + progs[name] = spec + + if spec.SectionName != ".text" { + export = append(export, name) + } + } + } + + flattenPrograms(progs, export) + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) + } + } + + return progs, nil +} + +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { + r := bufio.NewReader(section.Open()) + + // Decode the section's instruction stream. + insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) + if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() + + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) + } + + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. + if rel, ok := section.relocations[offset]; ok { + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) + } + } else { + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) + } + } + } + + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) + } + + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) + } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) +} + +var errUnsupportedBinding = errors.New("unsupported binding") + +func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { + var ( + typ = elf.ST_TYPE(rel.Info) + bind = elf.ST_BIND(rel.Info) + name = rel.Name + ) + + target := ec.sections[rel.Section] + + switch target.kind { + case mapSection, btfMapSection: + if bind == elf.STB_LOCAL { + return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) + } + + if bind != elf.STB_GLOBAL { + return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { + // STT_NOTYPE is generated on clang < 8 which doesn't tag + // relocations appropriately. + return fmt.Errorf("map load: incorrect relocation type %v", typ) + } + + ins.Src = asm.PseudoMapFD + + case dataSection: + var offset uint32 + switch typ { + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // This is really a reference to a static symbol, which clang doesn't + // emit a symbol table entry for. Instead it encodes the offset in + // the instruction itself. + offset = uint32(uint64(ins.Constant)) + + case elf.STT_OBJECT: + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + default: + return fmt.Errorf("incorrect relocation type %v for direct map load", typ) + } + + // We rely on using the name of the data section as the reference. It + // would be nicer to keep the real name in case of an STT_OBJECT, but + // it's not clear how to encode that into Instruction. + name = target.Name + + // The kernel expects the offset in the second basic BPF instruction. + ins.Constant = int64(uint64(offset) << 32) + ins.Src = asm.PseudoMapValue + + case programSection: + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } + + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. + + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) + } + + sym, ok := target.symbols[uint64(ins.Constant)] + if !ok { + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) + } + + name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc + + default: + return fmt.Errorf("neither a call nor a load instruction: %v", ins) + } + + // The Undefined section is used for 'virtual' symbols that aren't backed by + // an ELF section. This includes symbol references from inline asm, forward + // function declarations, as well as extern kfunc declarations using __ksym + // and extern kconfig variables declared using __kconfig. + case undefSection: + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_NOTYPE { + return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) + } + + kf := ec.kfuncs[name] + switch { + // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name + // that matches the symbol name we mark the instruction as a referencing a kfunc. + case kf != nil && ins.OpCode.JumpOp() == asm.Call: + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Src = asm.PseudoKfuncCall + ins.Constant = -1 + + case kf != nil && ins.OpCode.IsDWordLoad(): + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Constant = 0 + + // If no kconfig map is found, this must be a symbol reference from inline + // asm (see testdata/loader.c:asm_relocation()) or a call to a forward + // function declaration (see testdata/fwd_decl.c). Don't interfere, These + // remain standard symbol references. + // extern __kconfig reads are represented as dword loads that need to be + // rewritten to pseudo map loads from .kconfig. If the map is present, + // require it to contain the symbol to disambiguate between inline asm + // relos and kconfigs. + case ec.kconfig != nil && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars { + if vsi.Type.(*btf.Var).Name != rel.Name { + continue + } + + ins.Src = asm.PseudoMapValue + ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset}) + return nil + } + + return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name) + } + + default: + return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) + } + + *ins = ins.WithReference(name) + return nil +} + +func (ec *elfCode) loadMaps() error { + for _, sec := range ec.sections { + if sec.kind != mapSection { + continue + } + + nSym := len(sec.symbols) + if nSym == 0 { + return fmt.Errorf("section %v: no symbols", sec.Name) + } + + if sec.Size%uint64(nSym) != 0 { + return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) + } + + var ( + r = bufio.NewReader(sec.Open()) + size = sec.Size / uint64(nSym) + ) + for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { + mapSym, ok := sec.symbols[offset] + if !ok { + return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) + } + + mapName := mapSym.Name + if ec.maps[mapName] != nil { + return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) + } + + lr := io.LimitReader(r, int64(size)) + + spec := MapSpec{ + Name: SanitizeName(mapName, -1), + } + switch { + case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: + return fmt.Errorf("map %s: missing type", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: + return fmt.Errorf("map %s: missing key size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: + return fmt.Errorf("map %s: missing value size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: + return fmt.Errorf("map %s: missing max entries", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: + return fmt.Errorf("map %s: missing flags", mapName) + } + + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = bytes.NewReader(extra) + } + + ec.maps[mapName] = &spec + } + } + + return nil +} + +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. +func (ec *elfCode) loadBTFMaps() error { + for _, sec := range ec.sections { + if sec.kind != btfMapSection { + continue + } + + if ec.btf == nil { + return fmt.Errorf("missing BTF") + } + + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { + return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) + } + + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. + v, ok := vs.Type.(*btf.Var) + if !ok { + return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) + } + name := string(v.Name) + + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + + if ec.maps[name] != nil { + return fmt.Errorf("section %v: map %s already exists", sec.Name, name) + } + + // Each Var representing a BTF map definition contains a Struct. + mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct) + if !ok { + return fmt.Errorf("expected struct, got %s", v.Type) + } + + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) + if err != nil { + return fmt.Errorf("map %v: %w", name, err) + } + + ec.maps[name] = mapSpec + } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } + } + + return nil +} + +// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing +// a BTF map definition. The name and spec arguments will be copied to the +// resulting MapSpec, and inner must be true on any recursive invocations. +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { + var ( + key, value btf.Type + keySize, valueSize uint32 + mapType MapType + flags, maxEntries uint32 + pinType PinType + innerMapSpec *MapSpec + contents []MapKV + err error + ) + + for i, member := range def.Members { + switch member.Name { + case "type": + mt, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get type: %w", err) + } + mapType = MapType(mt) + + case "map_flags": + flags, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map flags: %w", err) + } + + case "max_entries": + maxEntries, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map max entries: %w", err) + } + + case "key": + if keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + pk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("key type is not a pointer: %T", member.Type) + } + + key = pk.Target + + size, err := btf.Sizeof(pk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF key: %w", err) + } + + keySize = uint32(size) + + case "value": + if valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + vk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("value type is not a pointer: %T", member.Type) + } + + value = vk.Target + + size, err := btf.Sizeof(vk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF value: %w", err) + } + + valueSize = uint32(size) + + case "key_size": + // Key needs to be nil and keySize needs to be 0 for key_size to be + // considered a valid member. + if key != nil || keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + keySize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF key size: %w", err) + } + + case "value_size": + // Value needs to be nil and valueSize needs to be 0 for value_size to be + // considered a valid member. + if value != nil || valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + valueSize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF value size: %w", err) + } + + case "pinning": + if inner { + return nil, errors.New("inner maps can't be pinned") + } + + pinning, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get pinning: %w", err) + } + + pinType = PinType(pinning) + + case "values": + // The 'values' field in BTF map definitions is used for declaring map + // value types that are references to other BPF objects, like other maps + // or programs. It is always expected to be an array of pointers. + if i != len(def.Members)-1 { + return nil, errors.New("'values' must be the last member in a BTF map definition") + } + + if valueSize != 0 && valueSize != 4 { + return nil, errors.New("value_size must be 0 or 4") + } + valueSize = 4 + + valueType, err := resolveBTFArrayMacro(member.Type) + if err != nil { + return nil, fmt.Errorf("can't resolve type of member 'values': %w", err) + } + + switch t := valueType.(type) { + case *btf.Struct: + // The values member pointing to an array of structs means we're expecting + // a map-in-map declaration. + if mapType != ArrayOfMaps && mapType != HashOfMaps { + return nil, errors.New("outer map needs to be an array or a hash of maps") + } + if inner { + return nil, fmt.Errorf("nested inner maps are not supported") + } + + // This inner map spec is used as a map template, but it needs to be + // created as a traditional map before it can be used to do so. + // libbpf names the inner map template '.inner', but we + // opted for _inner to simplify validation logic. (dots only supported + // on kernels 5.2 and up) + // Pass the BTF spec from the parent object, since both parent and + // child must be created from the same BTF blob (on kernels that support BTF). + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) + if err != nil { + return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) + } + + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + + default: + return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) + } + + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + + case "map_extra": + return nil, fmt.Errorf("BTF map definition: field %s: %w", member.Name, ErrNotSupported) + + default: + return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) + } + } + + return &MapSpec{ + Name: SanitizeName(name, -1), + Type: MapType(mapType), + KeySize: keySize, + ValueSize: valueSize, + MaxEntries: maxEntries, + Flags: flags, + Key: key, + Value: value, + Pinning: pinType, + InnerMap: innerMapSpec, + Contents: contents, + }, nil +} + +// uintFromBTF resolves the __uint macro, which is a pointer to a sized +// array, e.g. for int (*foo)[10], this function will return 10. +func uintFromBTF(typ btf.Type) (uint32, error) { + ptr, ok := typ.(*btf.Pointer) + if !ok { + return 0, fmt.Errorf("not a pointer: %v", typ) + } + + arr, ok := ptr.Target.(*btf.Array) + if !ok { + return 0, fmt.Errorf("not a pointer to array: %v", typ) + } + + return arr.Nelems, nil +} + +// resolveBTFArrayMacro resolves the __array macro, which declares an array +// of pointers to a given type. This function returns the target Type of +// the pointers in the array. +func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { + arr, ok := typ.(*btf.Array) + if !ok { + return nil, fmt.Errorf("not an array: %v", typ) + } + + ptr, ok := arr.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("not an array of pointers: %v", typ) + } + + return ptr.Target, nil +} + +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := member.Offset.Bytes() + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.SectionHeader.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), r.Name}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), r.Name}) + default: + return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name) + } + } + + return contents, nil +} + +func (ec *elfCode) loadDataSections() error { + for _, sec := range ec.sections { + if sec.kind != dataSection { + continue + } + + if sec.references == 0 { + // Prune data sections which are not referenced by any + // instructions. + continue + } + + mapSpec := &MapSpec{ + Name: SanitizeName(sec.Name, -1), + Type: Array, + KeySize: 4, + ValueSize: uint32(sec.Size), + MaxEntries: 1, + } + + switch sec.Type { + // Only open the section if we know there's actual data to be read. + case elf.SHT_PROGBITS: + data, err := sec.Data() + if err != nil { + return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + } + + if uint64(len(data)) > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + } + mapSpec.Contents = []MapKV{{uint32(0), data}} + + case elf.SHT_NOBITS: + // NOBITS sections like .bss contain only zeroes, and since data sections + // are Arrays, the kernel already preallocates them. Skip reading zeroes + // from the ELF. + default: + return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) + } + + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + } + } + + if strings.HasPrefix(sec.Name, ".rodata") { + mapSpec.Flags = unix.BPF_F_RDONLY_PROG + mapSpec.Freeze = true + } + + ec.maps[sec.Name] = mapSpec + } + + return nil +} + +// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKconfigSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".kconfig", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + if ds.Size == 0 { + return errors.New("zero-length .kconfig") + } + + ec.kconfig = &MapSpec{ + Name: ".kconfig", + Type: Array, + KeySize: uint32(4), + ValueSize: ds.Size, + MaxEntries: 1, + Flags: unix.BPF_F_RDONLY_PROG, + Freeze: true, + Key: &btf.Int{Size: 4}, + Value: ds, + } + + return nil +} + +// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKsymsSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".ksyms", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + for _, v := range ds.Vars { + // we have already checked the .ksyms Datasec to only contain Func Vars. + ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func) + } + + return nil +} + +type libbpfElfSectionDef struct { + pattern string + programType sys.ProgType + attachType sys.AttachType + flags libbpfElfSectionFlag +} + +type libbpfElfSectionFlag uint32 + +// The values correspond to enum sec_def_flags in libbpf. +const ( + _SEC_NONE libbpfElfSectionFlag = 0 + + _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1) + _SEC_ATTACHABLE + _SEC_ATTACH_BTF + _SEC_SLEEPABLE + _SEC_XDP_FRAGS + _SEC_USDT + + // Ignore any present extra in order to preserve backwards compatibility + // with earlier versions of the library. + ignoreExtra + + _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT +) + +func init() { + // Compatibility with older versions of the library. + // We prepend libbpf definitions since they contain a prefix match + // for "xdp". + elfSectionDefs = append([]libbpfElfSectionDef{ + {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, + {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, + {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, + // This has been in the library since the beginning of time. Not sure + // where it came from. + {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + }, elfSectionDefs...) +} + +func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { + // Skip optional program marking for now. + sectionName = strings.TrimPrefix(sectionName, "?") + + for _, t := range elfSectionDefs { + extra, ok := matchSectionName(sectionName, t.pattern) + if !ok { + continue + } + + programType := ProgramType(t.programType) + attachType := AttachType(t.attachType) + + var flags uint32 + if t.flags&_SEC_SLEEPABLE > 0 { + flags |= unix.BPF_F_SLEEPABLE + } + if t.flags&_SEC_XDP_FRAGS > 0 { + flags |= unix.BPF_F_XDP_HAS_FRAGS + } + if t.flags&_SEC_EXP_ATTACH_OPT > 0 { + if programType == XDP { + // The library doesn't yet have code to fallback to not specifying + // attach type. Only do this for XDP since we've enforced correct + // attach type for all other program types. + attachType = AttachNone + } + } + if t.flags&ignoreExtra > 0 { + extra = "" + } + + return programType, attachType, flags, extra + } + + return UnspecifiedProgram, AttachNone, 0, "" +} + +// matchSectionName checks a section name against a pattern. +// +// It's behaviour mirrors that of libbpf's sec_def_matches. +func matchSectionName(sectionName, pattern string) (extra string, found bool) { + have, extra, found := strings.Cut(sectionName, "/") + want := strings.TrimRight(pattern, "+/") + + if strings.HasSuffix(pattern, "/") { + // Section name must have a slash and extra may be empty. + return extra, have == want && found + } else if strings.HasSuffix(pattern, "+") { + // Section name may have a slash and extra may be empty. + return extra, have == want + } + + // Section name must have a prefix. extra is ignored. + return "", strings.HasPrefix(sectionName, pattern) +} + +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { + rels := make(map[uint64]elf.Symbol) + + if sec.Entsize < 16 { + return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name) + } + + r := bufio.NewReader(sec.Open()) + for off := uint64(0); off < sec.Size; off += sec.Entsize { + ent := io.LimitReader(r, int64(sec.Entsize)) + + var rel elf.Rel64 + if binary.Read(ent, ec.ByteOrder, &rel) != nil { + return nil, fmt.Errorf("can't parse relocation at offset %v", off) + } + + symNo := int(elf.R_SYM64(rel.Info) - 1) + if symNo >= len(symbols) { + return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo) + } + + symbol := symbols[symNo] + rels[rel.Off] = symbol + } + + return rels, nil +} diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go new file mode 100644 index 0000000000..4b58251d9a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_sections.go @@ -0,0 +1,109 @@ +// Code generated by internal/cmd/gensections.awk; DO NOT EDIT. + +package ebpf + +// Code in this file is derived from libbpf, available under BSD-2-Clause. + +import "github.com/cilium/ebpf/internal/sys" + +var elfSectionDefs = []libbpfElfSectionDef{ + {"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + {"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE}, + {"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE}, + {"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT}, + {"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE}, + {"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE}, + {"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE}, + {"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE}, + {"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF}, + {"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF}, + {"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF}, + {"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF}, + {"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF}, + {"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF}, + {"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF}, + {"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF}, + {"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE}, + {"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE}, + {"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE}, + {"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS}, + {"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT}, + {"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE}, + {"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE}, + {"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE}, + {"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE}, + {"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE}, + {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE}, + {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT}, + {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT}, + {"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE}, + {"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE}, + {"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE}, + {"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT}, + {"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE}, + {"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE}, + {"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT}, + {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE}, + {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE}, + {"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE}, + {"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE}, +} diff --git a/vendor/github.com/cilium/ebpf/features/doc.go b/vendor/github.com/cilium/ebpf/features/doc.go new file mode 100644 index 0000000000..acc57e3b1e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/doc.go @@ -0,0 +1,19 @@ +// Package features allows probing for BPF features available to the calling process. +// +// In general, the error return values from feature probes in this package +// all have the following semantics unless otherwise specified: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that resource +// creation may succeed despite an error being returned. For example, some +// map and program types cannot reliably be probed and will return an +// inconclusive error. +// +// As a rule, only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached by the library and persist throughout any changes +// to the process' environment, like capability changes. +package features diff --git a/vendor/github.com/cilium/ebpf/features/map.go b/vendor/github.com/cilium/ebpf/features/map.go new file mode 100644 index 0000000000..8923e736a1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/map.go @@ -0,0 +1,321 @@ +package features + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveMapType probes the running kernel for the availability of the specified map type. +// +// See the package documentation for the meaning of the error return value. +func HaveMapType(mt ebpf.MapType) error { + return haveMapTypeMatrix.Result(mt) +} + +func probeCgroupStorageMap(mt sys.MapType) error { + // keySize needs to be sizeof(struct{u32 + u64}) = 12 (+ padding = 16) + // by using unsafe.Sizeof(int) we are making sure that this works on 32bit and 64bit archs + return createMap(&sys.MapCreateAttr{ + MapType: mt, + ValueSize: 4, + KeySize: uint32(8 + unsafe.Sizeof(int(0))), + MaxEntries: 0, + }) +} + +func probeStorageMap(mt sys.MapType) error { + // maxEntries needs to be 0 + // BPF_F_NO_PREALLOC needs to be set + // btf* fields need to be set + // see alloc_check for local_storage map types + err := createMap(&sys.MapCreateAttr{ + MapType: mt, + KeySize: 4, + ValueSize: 4, + MaxEntries: 0, + MapFlags: unix.BPF_F_NO_PREALLOC, + BtfKeyTypeId: 1, + BtfValueTypeId: 1, + BtfFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + // Triggered by BtfFd. + return nil + } + return err +} + +func probeNestedMap(mt sys.MapType) error { + // assign invalid innerMapFd to pass validation check + // will return EBADF + err := probeMap(&sys.MapCreateAttr{ + MapType: mt, + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + return nil + } + return err +} + +func probeMap(attr *sys.MapCreateAttr) error { + if attr.KeySize == 0 { + attr.KeySize = 4 + } + if attr.ValueSize == 0 { + attr.ValueSize = 4 + } + attr.MaxEntries = 1 + return createMap(attr) +} + +func createMap(attr *sys.MapCreateAttr) error { + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + return nil + } + + switch { + // EINVAL occurs when attempting to create a map with an unknown type. + // E2BIG occurs when MapCreateAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given map type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + return ebpf.ErrNotSupported + } + + return err +} + +var haveMapTypeMatrix = internal.FeatureMatrix[ebpf.MapType]{ + ebpf.Hash: {Version: "3.19"}, + ebpf.Array: {Version: "3.19"}, + ebpf.ProgramArray: {Version: "4.2"}, + ebpf.PerfEventArray: {Version: "4.3"}, + ebpf.PerCPUHash: {Version: "4.6"}, + ebpf.PerCPUArray: {Version: "4.6"}, + ebpf.StackTrace: { + Version: "4.6", + Fn: func() error { + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK_TRACE, + ValueSize: 8, // sizeof(uint64) + }) + }, + }, + ebpf.CGroupArray: {Version: "4.8"}, + ebpf.LRUHash: {Version: "4.10"}, + ebpf.LRUCPUHash: {Version: "4.10"}, + ebpf.LPMTrie: { + Version: "4.11", + Fn: func() error { + // keySize and valueSize need to be sizeof(struct{u32 + u8}) + 1 + padding = 8 + // BPF_F_NO_PREALLOC needs to be set + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_LPM_TRIE, + KeySize: 8, + ValueSize: 8, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) + }, + }, + ebpf.ArrayOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_ARRAY_OF_MAPS) }, + }, + ebpf.HashOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_HASH_OF_MAPS) }, + }, + ebpf.DevMap: {Version: "4.14"}, + ebpf.SockMap: {Version: "4.14"}, + ebpf.CPUMap: {Version: "4.15"}, + ebpf.XSKMap: {Version: "4.18"}, + ebpf.SockHash: {Version: "4.18"}, + ebpf.CGroupStorage: { + Version: "4.19", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_CGROUP_STORAGE) }, + }, + ebpf.ReusePortSockArray: {Version: "4.19"}, + ebpf.PerCPUCGroupStorage: { + Version: "4.20", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) }, + }, + ebpf.Queue: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_QUEUE, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.Stack: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.SkStorage: { + Version: "5.2", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_SK_STORAGE) }, + }, + ebpf.DevMapHash: {Version: "5.4"}, + ebpf.StructOpsMap: { + Version: "5.6", + Fn: func() error { + // StructOps requires setting a vmlinux type id, but id 1 will always + // resolve to some type of integer. This will cause ENOTSUPP. + err := probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STRUCT_OPS, + BtfVmlinuxValueTypeId: 1, + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the map type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.RingBuf: { + Version: "5.8", + Fn: func() error { + // keySize and valueSize need to be 0 + // maxEntries needs to be power of 2 and PAGE_ALIGNED + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_RINGBUF, + KeySize: 0, + ValueSize: 0, + MaxEntries: uint32(os.Getpagesize()), + }) + }, + }, + ebpf.InodeStorage: { + Version: "5.10", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_INODE_STORAGE) }, + }, + ebpf.TaskStorage: { + Version: "5.11", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_TASK_STORAGE) }, + }, +} + +func init() { + for mt, ft := range haveMapTypeMatrix { + ft.Name = mt.String() + if ft.Fn == nil { + // Avoid referring to the loop variable in the closure. + mt := sys.MapType(mt) + ft.Fn = func() error { return probeMap(&sys.MapCreateAttr{MapType: mt}) } + } + } +} + +// MapFlags document which flags may be feature probed. +type MapFlags = sys.MapFlags + +// Flags which may be feature probed. +const ( + BPF_F_NO_PREALLOC = sys.BPF_F_NO_PREALLOC + BPF_F_RDONLY_PROG = sys.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = sys.BPF_F_WRONLY_PROG + BPF_F_MMAPABLE = sys.BPF_F_MMAPABLE + BPF_F_INNER_MAP = sys.BPF_F_INNER_MAP +) + +// HaveMapFlag probes the running kernel for the availability of the specified map flag. +// +// Returns an error if flag is not one of the flags declared in this package. +// See the package documentation for the meaning of the error return value. +func HaveMapFlag(flag MapFlags) (err error) { + return haveMapFlagsMatrix.Result(flag) +} + +func probeMapFlag(attr *sys.MapCreateAttr) error { + // For now, we do not check if the map type is supported because we only support + // probing for flags defined on arrays and hashes that are always supported. + // In the future, if we allow probing on flags defined on newer types, checking for map type + // support will be required. + if attr.MapType == sys.BPF_MAP_TYPE_UNSPEC { + attr.MapType = sys.BPF_MAP_TYPE_ARRAY + } + + attr.KeySize = 4 + attr.ValueSize = 4 + attr.MaxEntries = 1 + + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + } else if errors.Is(err, unix.EINVAL) { + // EINVAL occurs when attempting to create a map with an unknown type or an unknown flag. + err = ebpf.ErrNotSupported + } + + return err +} + +var haveMapFlagsMatrix = internal.FeatureMatrix[MapFlags]{ + BPF_F_NO_PREALLOC: { + Version: "4.6", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_HASH, + MapFlags: BPF_F_NO_PREALLOC, + }) + }, + }, + BPF_F_RDONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_RDONLY_PROG, + }) + }, + }, + BPF_F_WRONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_WRONLY_PROG, + }) + }, + }, + BPF_F_MMAPABLE: { + Version: "5.5", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_MMAPABLE, + }) + }, + }, + BPF_F_INNER_MAP: { + Version: "5.10", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_INNER_MAP, + }) + }, + }, +} + +func init() { + for mf, ft := range haveMapFlagsMatrix { + ft.Name = fmt.Sprint(mf) + } +} diff --git a/vendor/github.com/cilium/ebpf/features/misc.go b/vendor/github.com/cilium/ebpf/features/misc.go new file mode 100644 index 0000000000..6bd8df9332 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/misc.go @@ -0,0 +1,95 @@ +package features + +import ( + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// HaveLargeInstructions probes the running kernel if more than 4096 instructions +// per program are supported. +// +// Upstream commit c04c0d2b968a ("bpf: increase complexity limit and maximum program size"). +// +// See the package documentation for the meaning of the error return value. +func HaveLargeInstructions() error { + return haveLargeInstructions() +} + +var haveLargeInstructions = internal.NewFeatureTest(">4096 instructions", "5.2", func() error { + const maxInsns = 4096 + + insns := make(asm.Instructions, maxInsns, maxInsns+1) + for i := range insns { + insns[i] = asm.Mov.Imm(asm.R0, 1) + } + insns = append(insns, asm.Return()) + + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: insns, + }) +}) + +// HaveBoundedLoops probes the running kernel if bounded loops are supported. +// +// Upstream commit 2589726d12a1 ("bpf: introduce bounded loops"). +// +// See the package documentation for the meaning of the error return value. +func HaveBoundedLoops() error { + return haveBoundedLoops() +} + +var haveBoundedLoops = internal.NewFeatureTest("bounded loops", "5.3", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 10), + asm.Sub.Imm(asm.R0, 1).WithSymbol("loop"), + asm.JNE.Imm(asm.R0, 0, "loop"), + asm.Return(), + }, + }) +}) + +// HaveV2ISA probes the running kernel if instructions of the v2 ISA are supported. +// +// Upstream commit 92b31a9af73b ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions"). +// +// See the package documentation for the meaning of the error return value. +func HaveV2ISA() error { + return haveV2ISA() +} + +var haveV2ISA = internal.NewFeatureTest("v2 ISA", "4.14", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) +}) + +// HaveV3ISA probes the running kernel if instructions of the v3 ISA are supported. +// +// Upstream commit 092ed0968bb6 ("bpf: verifier support JMP32"). +// +// See the package documentation for the meaning of the error return value. +func HaveV3ISA() error { + return haveV3ISA() +} + +var haveV3ISA = internal.NewFeatureTest("v3 ISA", "5.1", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm32(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) +}) diff --git a/vendor/github.com/cilium/ebpf/features/prog.go b/vendor/github.com/cilium/ebpf/features/prog.go new file mode 100644 index 0000000000..dc13b86d3a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/prog.go @@ -0,0 +1,300 @@ +package features + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveProgType probes the running kernel for the availability of the specified program type. +// +// Deprecated: use HaveProgramType() instead. +var HaveProgType = HaveProgramType + +// HaveProgramType probes the running kernel for the availability of the specified program type. +// +// See the package documentation for the meaning of the error return value. +func HaveProgramType(pt ebpf.ProgramType) (err error) { + return haveProgramTypeMatrix.Result(pt) +} + +func probeProgram(spec *ebpf.ProgramSpec) error { + if spec.Instructions == nil { + spec.Instructions = asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + } + } + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + err = ebpf.ErrNotSupported + } + + return err +} + +var haveProgramTypeMatrix = internal.FeatureMatrix[ebpf.ProgramType]{ + ebpf.SocketFilter: {Version: "3.19"}, + ebpf.Kprobe: {Version: "4.1"}, + ebpf.SchedCLS: {Version: "4.1"}, + ebpf.SchedACT: {Version: "4.1"}, + ebpf.TracePoint: {Version: "4.7"}, + ebpf.XDP: {Version: "4.8"}, + ebpf.PerfEvent: {Version: "4.9"}, + ebpf.CGroupSKB: {Version: "4.10"}, + ebpf.CGroupSock: {Version: "4.10"}, + ebpf.LWTIn: {Version: "4.10"}, + ebpf.LWTOut: {Version: "4.10"}, + ebpf.LWTXmit: {Version: "4.10"}, + ebpf.SockOps: {Version: "4.13"}, + ebpf.SkSKB: {Version: "4.14"}, + ebpf.CGroupDevice: {Version: "4.15"}, + ebpf.SkMsg: {Version: "4.17"}, + ebpf.RawTracepoint: {Version: "4.17"}, + ebpf.CGroupSockAddr: { + Version: "4.17", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockAddr, + AttachType: ebpf.AttachCGroupInet4Connect, + }) + }, + }, + ebpf.LWTSeg6Local: {Version: "4.18"}, + ebpf.LircMode2: {Version: "4.18"}, + ebpf.SkReuseport: {Version: "4.19"}, + ebpf.FlowDissector: {Version: "4.20"}, + ebpf.CGroupSysctl: {Version: "5.2"}, + ebpf.RawTracepointWritable: {Version: "5.2"}, + ebpf.CGroupSockopt: { + Version: "5.3", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockopt, + AttachType: ebpf.AttachCGroupGetsockopt, + }) + }, + }, + ebpf.Tracing: { + Version: "5.5", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Tracing, + AttachType: ebpf.AttachTraceFEntry, + AttachTo: "bpf_init", + }) + }, + }, + ebpf.StructOps: { + Version: "5.6", + Fn: func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.StructOps, + License: "GPL", + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the program type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.Extension: { + Version: "5.6", + Fn: func() error { + // create btf.Func to add to first ins of target and extension so both progs are btf powered + btfFn := btf.Func{ + Name: "a", + Type: &btf.FuncProto{ + Return: &btf.Int{}, + Params: []btf.FuncParam{ + {Name: "ctx", Type: &btf.Pointer{Target: &btf.Struct{Name: "xdp_md"}}}, + }, + }, + Linkage: btf.GlobalFunc, + } + insns := asm.Instructions{ + btf.WithFuncMetadata(asm.Mov.Imm(asm.R0, 0), &btfFn), + asm.Return(), + } + + // create target prog + prog, err := ebpf.NewProgramWithOptions( + &ebpf.ProgramSpec{ + Type: ebpf.XDP, + Instructions: insns, + }, + ebpf.ProgramOptions{ + LogDisabled: true, + }, + ) + if err != nil { + return err + } + defer prog.Close() + + // probe for Extension prog with target + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Extension, + Instructions: insns, + AttachTarget: prog, + AttachTo: btfFn.Name, + }) + }, + }, + ebpf.LSM: { + Version: "5.7", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.LSM, + AttachType: ebpf.AttachLSMMac, + AttachTo: "file_mprotect", + License: "GPL", + }) + }, + }, + ebpf.SkLookup: { + Version: "5.9", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SkLookup, + AttachType: ebpf.AttachSkLookup, + }) + }, + }, + ebpf.Syscall: { + Version: "5.14", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Syscall, + Flags: unix.BPF_F_SLEEPABLE, + }) + }, + }, +} + +func init() { + for key, ft := range haveProgramTypeMatrix { + ft.Name = key.String() + if ft.Fn == nil { + key := key // avoid the dreaded loop variable problem + ft.Fn = func() error { return probeProgram(&ebpf.ProgramSpec{Type: key}) } + } + } +} + +type helperKey struct { + typ ebpf.ProgramType + helper asm.BuiltinFunc +} + +var helperCache = internal.NewFeatureCache(func(key helperKey) *internal.FeatureTest { + return &internal.FeatureTest{ + Name: fmt.Sprintf("%s for program type %s", key.helper, key.typ), + Fn: func() error { + return haveProgramHelper(key.typ, key.helper) + }, + } +}) + +// HaveProgramHelper probes the running kernel for the availability of the specified helper +// function to a specified program type. +// Return values have the following semantics: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that program creation may +// succeed despite an error being returned. +// Only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached and persist throughout any process capability changes. +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if helper > helper.Max() { + return os.ErrInvalid + } + + return helperCache.Result(helperKey{pt, helper}) +} + +func haveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if ok := helperProbeNotImplemented(pt); ok { + return fmt.Errorf("no feature probe for %v/%v", pt, helper) + } + + if err := HaveProgramType(pt); err != nil { + return err + } + + spec := &ebpf.ProgramSpec{ + Type: pt, + Instructions: asm.Instructions{ + helper.Call(), + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "GPL", + } + + switch pt { + case ebpf.CGroupSockAddr: + spec.AttachType = ebpf.AttachCGroupInet4Connect + case ebpf.CGroupSockopt: + spec.AttachType = ebpf.AttachCGroupGetsockopt + case ebpf.SkLookup: + spec.AttachType = ebpf.AttachSkLookup + case ebpf.Syscall: + spec.Flags = unix.BPF_F_SLEEPABLE + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EACCES occurs when attempting to create a program probe with a helper + // while the register args when calling this helper aren't set up properly. + // We interpret this as the helper being available, because the verifier + // returns EINVAL if the helper is not supported by the running kernel. + case errors.Is(err, unix.EACCES): + // TODO: possibly we need to check verifier output here to be sure + err = nil + + // EINVAL occurs when attempting to create a program with an unknown helper. + case errors.Is(err, unix.EINVAL): + // TODO: possibly we need to check verifier output here to be sure + err = ebpf.ErrNotSupported + } + + return err +} + +func helperProbeNotImplemented(pt ebpf.ProgramType) bool { + switch pt { + case ebpf.Extension, ebpf.LSM, ebpf.StructOps, ebpf.Tracing: + return true + } + return false +} diff --git a/vendor/github.com/cilium/ebpf/features/version.go b/vendor/github.com/cilium/ebpf/features/version.go new file mode 100644 index 0000000000..69e1c39c1a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/version.go @@ -0,0 +1,18 @@ +package features + +import "github.com/cilium/ebpf/internal" + +// LinuxVersionCode returns the version of the currently running kernel +// as defined in the LINUX_VERSION_CODE compile-time macro. It is represented +// in the format described by the KERNEL_VERSION macro from linux/version.h. +// +// Do not use the version to make assumptions about the presence of certain +// kernel features, always prefer feature probes in this package. Some +// distributions backport or disable eBPF features. +func LinuxVersionCode() (uint32, error) { + v, err := internal.KernelVersion() + if err != nil { + return 0, err + } + return v.Kernel(), nil +} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go new file mode 100644 index 0000000000..04c60c64b8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/info.go @@ -0,0 +1,499 @@ +package ebpf + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// The *Info structs expose metadata about a program or map. Most +// fields are exposed via a getter: +// +// func (*MapInfo) ID() (MapID, bool) +// +// This is because the metadata available changes based on kernel version. +// The second boolean return value indicates whether a particular field is +// available on the current kernel. +// +// Always add new metadata as such a getter, unless you can somehow get the +// value of the field on all supported kernels. Also document which version +// a particular field first appeared in. +// +// Some metadata is a buffer which needs additional parsing. In this case, +// store the undecoded data in the Info struct and provide a getter which +// decodes it when necessary. See ProgramInfo.Instructions for an example. + +// MapInfo describes a map. +type MapInfo struct { + Type MapType + id MapID + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + Flags uint32 + // Name as supplied by user space at load time. Available from 4.15. + Name string + + btf btf.ID +} + +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err := sys.ObjInfo(fd, &info) + if errors.Is(err, syscall.EINVAL) { + return newMapInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + return &MapInfo{ + MapType(info.Type), + MapID(info.Id), + info.KeySize, + info.ValueSize, + info.MaxEntries, + uint32(info.MapFlags), + unix.ByteSliceToString(info.Name[:]), + btf.ID(info.BtfId), + }, nil +} + +func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { + var mi MapInfo + err := scanFdInfo(fd, map[string]interface{}{ + "map_type": &mi.Type, + "key_size": &mi.KeySize, + "value_size": &mi.ValueSize, + "max_entries": &mi.MaxEntries, + "map_flags": &mi.Flags, + }) + if err != nil { + return nil, err + } + return &mi, nil +} + +// ID returns the map ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) ID() (MapID, bool) { + return mi.id, mi.id > 0 +} + +// BTFID returns the BTF ID associated with the Map. +// +// The ID is only valid as long as the associated Map is kept alive. +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the Map was loaded without BTF information.) +func (mi *MapInfo) BTFID() (btf.ID, bool) { + return mi.btf, mi.btf > 0 +} + +// programStats holds statistics of a program. +type programStats struct { + // Total accumulated runtime of the program ins ns. + runtime time.Duration + // Total number of times the program was called. + runCount uint64 + // Total number of times the programm was NOT called. + // Added in commit 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented"). + recursionMisses uint64 +} + +// ProgramInfo describes a program. +type ProgramInfo struct { + Type ProgramType + id ProgramID + // Truncated hash of the BPF bytecode. Available from 4.13. + Tag string + // Name as supplied by user space at load time. Available from 4.15. + Name string + + createdByUID uint32 + haveCreatedByUID bool + btf btf.ID + stats *programStats + + maps []MapID + insns []byte + + lineInfos []byte + numLineInfos uint32 + funcInfos []byte + numFuncInfos uint32 +} + +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err := sys.ObjInfo(fd, &info) + if errors.Is(err, syscall.EINVAL) { + return newProgramInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + pi := ProgramInfo{ + Type: ProgramType(info.Type), + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + stats: &programStats{ + runtime: time.Duration(info.RunTimeNs), + runCount: info.RunCnt, + recursionMisses: info.RecursionMisses, + }, + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + makeSecondCall := false + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + makeSecondCall = true + } else if haveProgramInfoMapIDs() == nil { + // This program really has no associated maps. + pi.maps = make([]MapID, 0) + } else { + // The kernel doesn't report associated maps. + pi.maps = nil + } + + // createdByUID and NrMapIds were introduced in the same kernel version. + if pi.maps != nil { + pi.createdByUID = info.CreatedByUid + pi.haveCreatedByUID = true + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + makeSecondCall = true + } + + if info.NrLineInfo > 0 { + pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo) + info2.LineInfo = sys.NewSlicePointer(pi.lineInfos) + info2.LineInfoRecSize = btf.LineInfoSize + info2.NrLineInfo = info.NrLineInfo + pi.numLineInfos = info.NrLineInfo + makeSecondCall = true + } + + if info.NrFuncInfo > 0 { + pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo) + info2.FuncInfo = sys.NewSlicePointer(pi.funcInfos) + info2.FuncInfoRecSize = btf.FuncInfoSize + info2.NrFuncInfo = info.NrFuncInfo + pi.numFuncInfos = info.NrFuncInfo + makeSecondCall = true + } + + if makeSecondCall { + if err := sys.ObjInfo(fd, &info2); err != nil { + return nil, err + } + } + + return &pi, nil +} + +func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { + var info ProgramInfo + err := scanFdInfo(fd, map[string]interface{}{ + "prog_type": &info.Type, + "prog_tag": &info.Tag, + }) + if errors.Is(err, errMissingFields) { + return nil, &internal.UnsupportedFeatureError{ + Name: "reading program info from /proc/self/fdinfo", + MinimumVersion: internal.Version{4, 10, 0}, + } + } + if err != nil { + return nil, err + } + + return &info, nil +} + +// ID returns the program ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) ID() (ProgramID, bool) { + return pi.id, pi.id > 0 +} + +// CreatedByUID returns the Uid that created the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) CreatedByUID() (uint32, bool) { + return pi.createdByUID, pi.haveCreatedByUID +} + +// BTFID returns the BTF ID associated with the program. +// +// The ID is only valid as long as the associated program is kept alive. +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + +// RunCount returns the total number of times the program was called. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) RunCount() (uint64, bool) { + if pi.stats != nil { + return pi.stats.runCount, true + } + return 0, false +} + +// Runtime returns the total accumulated runtime of the program. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Runtime() (time.Duration, bool) { + if pi.stats != nil { + return pi.stats.runtime, true + } + return time.Duration(0), false +} + +// RecursionMisses returns the total number of times the program was NOT called. +// This can happen when another bpf program is already running on the cpu, which +// is likely to happen for example when you interrupt bpf program execution. +func (pi *ProgramInfo) RecursionMisses() (uint64, bool) { + if pi.stats != nil { + return pi.stats.recursionMisses, true + } + return 0, false +} + +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. +// +// The first instruction is marked as a symbol using the Program's name. +// +// If available, the instructions will be annotated with metadata from the +// BTF. This includes line information and function information. Reading +// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is +// unavailable, the instructions will be returned without metadata. +// +// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions. +// Requires CAP_SYS_ADMIN for instructions with metadata. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + var insns asm.Instructions + if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) + } + + if pi.btf != 0 { + btfh, err := btf.NewHandleFromID(pi.btf) + if err != nil { + // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM. + // Ignore it and fall back to instructions without metadata. + if !errors.Is(err, unix.EPERM) { + return nil, fmt.Errorf("unable to get BTF handle: %w", err) + } + } + + // If we have a BTF handle, we can use it to assign metadata to the instructions. + if btfh != nil { + defer btfh.Close() + + spec, err := btfh.Spec(nil) + if err != nil { + return nil, fmt.Errorf("unable to get BTF spec: %w", err) + } + + lineInfos, err := btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse line info: %w", err) + } + + funcInfos, err := btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse func info: %w", err) + } + + btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{}) + } + } + + fn := btf.FuncMetadata(&insns[0]) + name := pi.Name + if fn != nil { + name = fn.Name + } + insns[0] = insns[0].WithSymbol(name) + + return insns, nil +} + +// MapIDs returns the maps related to the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.maps, pi.maps != nil +} + +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) + if err != nil { + return err + } + defer fh.Close() + + if err := scanFdInfoReader(fh, fields); err != nil { + return fmt.Errorf("%s: %w", fh.Name(), err) + } + return nil +} + +var errMissingFields = errors.New("missing fields") + +func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { + var ( + scanner = bufio.NewScanner(r) + scanned int + ) + + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), "\t", 2) + if len(parts) != 2 { + continue + } + + name := strings.TrimSuffix(parts[0], ":") + field, ok := fields[string(name)] + if !ok { + continue + } + + if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", name, err) + } + + scanned++ + } + + if err := scanner.Err(); err != nil { + return err + } + + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + + if scanned != len(fields) { + return errMissingFields + } + + return nil +} + +// EnableStats starts the measuring of the runtime +// and run counts of eBPF programs. +// +// Collecting statistics can have an impact on the performance. +// +// Requires at least 5.8. +func EnableStats(which uint32) (io.Closer, error) { + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) + if err != nil { + return nil, err + } + return fd, nil +} + +var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error { + prog, err := progLoad(asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, SocketFilter, "MIT") + if err != nil { + return err + } + defer prog.Close() + + err = sys.ObjInfo(prog, &sys.ProgInfo{ + // NB: Don't need to allocate MapIds since the program isn't using + // any maps. + NrMapIds: 1, + }) + if errors.Is(err, unix.EINVAL) { + // Most likely the syscall doesn't exist. + return internal.ErrNotSupported + } + if errors.Is(err, unix.E2BIG) { + // We've hit check_uarg_tail_zero on older kernels. + return internal.ErrNotSupported + } + + return err +}) diff --git a/vendor/github.com/cilium/ebpf/internal/auxv.go b/vendor/github.com/cilium/ebpf/internal/auxv.go new file mode 100644 index 0000000000..45fd0d37f1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/auxv.go @@ -0,0 +1,60 @@ +package internal + +import ( + "errors" + "io" + _ "unsafe" +) + +type auxvPairReader interface { + Close() error + ReadAuxvPair() (uint64, uint64, error) +} + +// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h +const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +type auxvRuntimeReader struct { + data []uintptr + index int +} + +func (r *auxvRuntimeReader) Close() error { + return nil +} + +func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) { + if r.index >= len(r.data)+2 { + return 0, 0, io.EOF + } + + // we manually add the (_AT_NULL, _AT_NULL) pair at the end + // that is not provided by the go runtime + var tag, value uintptr + if r.index+1 < len(r.data) { + tag, value = r.data[r.index], r.data[r.index+1] + } else { + tag, value = _AT_NULL, _AT_NULL + } + r.index += 2 + return uint64(tag), uint64(value), nil +} + +func newAuxvRuntimeReader() (auxvPairReader, error) { + data := runtime_getAuxv() + + if len(data)%2 != 0 { + return nil, errors.New("malformed auxv passed from runtime") + } + + return &auxvRuntimeReader{ + data: data, + index: 0, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go new file mode 100644 index 0000000000..81c6544330 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/buffer.go @@ -0,0 +1,31 @@ +package internal + +import ( + "bytes" + "sync" +) + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it. +// +// The returned buffer should be passed to [PutBuffer]. +func NewBuffer(buf []byte) *bytes.Buffer { + wr := bytesBufferPool.Get().(*bytes.Buffer) + // Reinitialize the Buffer with a new backing slice since it is returned to + // the caller by wr.Bytes() below. Pooling is faster despite calling + // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed. + *wr = *bytes.NewBuffer(buf) + return wr +} + +// PutBuffer releases a buffer to the pool. +func PutBuffer(buf *bytes.Buffer) { + // Release reference to the backing buffer. + *buf = *bytes.NewBuffer(nil) + bytesBufferPool.Put(buf) +} diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go new file mode 100644 index 0000000000..e3a3050215 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/deque.go @@ -0,0 +1,91 @@ +package internal + +import "math/bits" + +// Deque implements a double ended queue. +type Deque[T any] struct { + elems []T + read, write uint64 + mask uint64 +} + +// Reset clears the contents of the deque while retaining the backing buffer. +func (dq *Deque[T]) Reset() { + var zero T + + for i := dq.read; i < dq.write; i++ { + dq.elems[i&dq.mask] = zero + } + + dq.read, dq.write = 0, 0 +} + +func (dq *Deque[T]) Empty() bool { + return dq.read == dq.write +} + +// Push adds an element to the end. +func (dq *Deque[T]) Push(e T) { + dq.Grow(1) + dq.elems[dq.write&dq.mask] = e + dq.write++ +} + +// Shift returns the first element or the zero value. +func (dq *Deque[T]) Shift() T { + var zero T + + if dq.Empty() { + return zero + } + + index := dq.read & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + dq.read++ + return t +} + +// Pop returns the last element or the zero value. +func (dq *Deque[T]) Pop() T { + var zero T + + if dq.Empty() { + return zero + } + + dq.write-- + index := dq.write & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + return t +} + +// Grow the deque's capacity, if necessary, to guarantee space for another n +// elements. +func (dq *Deque[T]) Grow(n int) { + have := dq.write - dq.read + need := have + uint64(n) + if need < have { + panic("overflow") + } + if uint64(len(dq.elems)) >= need { + return + } + + // Round up to the new power of two which is at least 8. + // See https://jameshfisher.com/2018/03/30/round-up-power-2/ + capacity := 1 << (64 - bits.LeadingZeros64(need-1)) + if capacity < 8 { + capacity = 8 + } + + elems := make([]T, have, capacity) + pivot := dq.read & dq.mask + copied := copy(elems, dq.elems[pivot:]) + copy(elems[copied:], dq.elems[:pivot]) + + dq.elems = elems[:capacity] + dq.mask = uint64(capacity) - 1 + dq.read, dq.write = 0, have +} diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go new file mode 100644 index 0000000000..011581938d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/elf.go @@ -0,0 +1,102 @@ +package internal + +import ( + "debug/elf" + "fmt" + "io" +) + +type SafeELFFile struct { + *elf.File +} + +// NewSafeELFFile reads an ELF safely. +// +// Any panic during parsing is turned into an error. This is necessary since +// there are a bunch of unfixed bugs in debug/elf. +// +// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle +func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.NewFile(r) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// Symbols is the safe version of elf.File.Symbols. +func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF symbols panicked: %s", r) + }() + + syms, err = se.File.Symbols() + return +} + +// DynamicSymbols is the safe version of elf.File.DynamicSymbols. +func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) + }() + + syms, err = se.File.DynamicSymbols() + return +} + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 0000000000..a37777f21f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,9 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.BigEndian diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 0000000000..6dcd916d5d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,9 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.LittleEndian diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go new file mode 100644 index 0000000000..83a371ad35 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/errors.go @@ -0,0 +1,181 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier +// log buffer. +// +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. +func ErrorWithLog(source string, err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + if i := bytes.IndexByte(log, 0); i != -1 { + log = log[:i] + } + + log = bytes.Trim(log, whitespace) + if len(log) == 0 { + return &VerifierError{source, err, nil, false} + } + + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) + } + + return &VerifierError{source, err, lines, false} +} + +// VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. +type VerifierError struct { + source string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string + // Deprecated: the log is never truncated anymore. + Truncated bool +} + +func (le *VerifierError) Unwrap() error { + return le.Cause +} + +func (le *VerifierError) Error() string { + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error()) + + n := len(log) + if n == 0 { + return b.String() + } + + lines := log[n-1:] + if n >= 2 && includePreviousLine(log[n-1]) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + for _, line := range lines { + b.WriteString(": ") + b.WriteString(strings.TrimSpace(line)) + } + + omitted := len(le.Log) - len(lines) + if omitted > 0 { + fmt.Fprintf(&b, " (%d line(s) omitted)", omitted) + } + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true + } + + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true + } + + return false +} + +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// %+v: Output the first lines, or all lines if no width is given. +// %-v: Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go new file mode 100644 index 0000000000..2b856c735e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/feature.go @@ -0,0 +1,184 @@ +package internal + +import ( + "errors" + "fmt" + "sync" +) + +// ErrNotSupported indicates that a feature is not supported by the current kernel. +var ErrNotSupported = errors.New("not supported") + +// UnsupportedFeatureError is returned by FeatureTest() functions. +type UnsupportedFeatureError struct { + // The minimum Linux mainline version required for this feature. + // Used for the error string, and for sanity checking during testing. + MinimumVersion Version + + // The name of the feature that isn't supported. + Name string +} + +func (ufe *UnsupportedFeatureError) Error() string { + if ufe.MinimumVersion.Unspecified() { + return fmt.Sprintf("%s not supported", ufe.Name) + } + return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion) +} + +// Is indicates that UnsupportedFeatureError is ErrNotSupported. +func (ufe *UnsupportedFeatureError) Is(target error) bool { + return target == ErrNotSupported +} + +// FeatureTest caches the result of a [FeatureTestFn]. +// +// Fields should not be modified after creation. +type FeatureTest struct { + // The name of the feature being detected. + Name string + // Version in the form Major.Minor[.Patch]. + Version string + // The feature test itself. + Fn FeatureTestFn + + mu sync.RWMutex + done bool + result error +} + +// FeatureTestFn is used to determine whether the kernel supports +// a certain feature. +// +// The return values have the following semantics: +// +// err == ErrNotSupported: the feature is not available +// err == nil: the feature is available +// err != nil: the test couldn't be executed +type FeatureTestFn func() error + +// NewFeatureTest is a convenient way to create a single [FeatureTest]. +func NewFeatureTest(name, version string, fn FeatureTestFn) func() error { + ft := &FeatureTest{ + Name: name, + Version: version, + Fn: fn, + } + + return ft.execute +} + +// execute the feature test. +// +// The result is cached if the test is conclusive. +// +// See [FeatureTestFn] for the meaning of the returned error. +func (ft *FeatureTest) execute() error { + ft.mu.RLock() + result, done := ft.result, ft.done + ft.mu.RUnlock() + + if done { + return result + } + + ft.mu.Lock() + defer ft.mu.Unlock() + + // The test may have been executed by another caller while we were + // waiting to acquire ft.mu. + if ft.done { + return ft.result + } + + err := ft.Fn() + if err == nil { + ft.done = true + return nil + } + + if errors.Is(err, ErrNotSupported) { + var v Version + if ft.Version != "" { + v, err = NewVersion(ft.Version) + if err != nil { + return fmt.Errorf("feature %s: %w", ft.Name, err) + } + } + + ft.done = true + ft.result = &UnsupportedFeatureError{ + MinimumVersion: v, + Name: ft.Name, + } + + return ft.result + } + + // We couldn't execute the feature test to a point + // where it could make a determination. + // Don't cache the result, just return it. + return fmt.Errorf("detect support for %s: %w", ft.Name, err) +} + +// FeatureMatrix groups multiple related feature tests into a map. +// +// Useful when there is a small number of discrete features which are known +// at compile time. +// +// It must not be modified concurrently with calling [FeatureMatrix.Result]. +type FeatureMatrix[K comparable] map[K]*FeatureTest + +// Result returns the outcome of the feature test for the given key. +// +// It's safe to call this function concurrently. +func (fm FeatureMatrix[K]) Result(key K) error { + ft, ok := fm[key] + if !ok { + return fmt.Errorf("no feature probe for %v", key) + } + + return ft.execute() +} + +// FeatureCache caches a potentially unlimited number of feature probes. +// +// Useful when there is a high cardinality for a feature test. +type FeatureCache[K comparable] struct { + mu sync.RWMutex + newTest func(K) *FeatureTest + features map[K]*FeatureTest +} + +func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] { + return &FeatureCache[K]{ + newTest: newTest, + features: make(map[K]*FeatureTest), + } +} + +func (fc *FeatureCache[K]) Result(key K) error { + // NB: Executing the feature test happens without fc.mu taken. + return fc.retrieve(key).execute() +} + +func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest { + fc.mu.RLock() + ft := fc.features[key] + fc.mu.RUnlock() + + if ft != nil { + return ft + } + + fc.mu.Lock() + defer fc.mu.Unlock() + + if ft := fc.features[key]; ft != nil { + return ft + } + + ft = fc.newTest(key) + fc.features[key] = ft + return ft +} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go new file mode 100644 index 0000000000..1eaf4775ad --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/io.go @@ -0,0 +1,128 @@ +package internal + +import ( + "bufio" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} + +// DiscardZeroes makes sure that all written bytes are zero +// before discarding them. +type DiscardZeroes struct{} + +func (DiscardZeroes) Write(p []byte) (int, error) { + for _, b := range p { + if b != 0 { + return 0, errors.New("encountered non-zero byte") + } + } + return len(p), nil +} + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} + +// ReadUint64FromFile reads a uint64 from a file. +// +// format specifies the contents of the file in fmt.Scanf syntax. +func ReadUint64FromFile(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + data, err := os.ReadFile(filename) + if err != nil { + return 0, fmt.Errorf("reading file %q: %w", filename, err) + } + + var value uint64 + n, err := fmt.Fscanf(bytes.NewReader(data), format, &value) + if err != nil { + return 0, fmt.Errorf("parsing file %q: %w", filename, err) + } + if n != 1 { + return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n) + } + + return value, nil +} + +type uint64FromFileKey struct { + format, path string +} + +var uint64FromFileCache = struct { + sync.RWMutex + values map[uint64FromFileKey]uint64 +}{ + values: map[uint64FromFileKey]uint64{}, +} + +// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result. +func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + key := uint64FromFileKey{format, filename} + + uint64FromFileCache.RLock() + if value, ok := uint64FromFileCache.values[key]; ok { + uint64FromFileCache.RUnlock() + return value, nil + } + uint64FromFileCache.RUnlock() + + value, err := ReadUint64FromFile(format, filename) + if err != nil { + return 0, err + } + + uint64FromFileCache.Lock() + defer uint64FromFileCache.Unlock() + + if value, ok := uint64FromFileCache.values[key]; ok { + // Someone else got here before us, use what is cached. + return value, nil + } + + uint64FromFileCache.values[key] = value + return value, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go new file mode 100644 index 0000000000..776c7a10a2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go @@ -0,0 +1,74 @@ +package kallsyms + +import ( + "bufio" + "bytes" + "io" + "os" + "sync" +) + +var kernelModules struct { + sync.RWMutex + // function to kernel module mapping + kmods map[string]string +} + +// KernelModule returns the kernel module, if any, a probe-able function is contained in. +func KernelModule(fn string) (string, error) { + kernelModules.RLock() + kmods := kernelModules.kmods + kernelModules.RUnlock() + + if kmods == nil { + kernelModules.Lock() + defer kernelModules.Unlock() + kmods = kernelModules.kmods + } + + if kmods != nil { + return kmods[fn], nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return "", err + } + defer f.Close() + kmods, err = loadKernelModuleMapping(f) + if err != nil { + return "", err + } + + kernelModules.kmods = kmods + return kmods[fn], nil +} + +// FlushKernelModuleCache removes any cached information about function to kernel module mapping. +func FlushKernelModuleCache() { + kernelModules.Lock() + defer kernelModules.Unlock() + + kernelModules.kmods = nil +} + +func loadKernelModuleMapping(f io.Reader) (map[string]string, error) { + mods := make(map[string]string) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Fields(scanner.Bytes()) + if len(fields) < 4 { + continue + } + switch string(fields[1]) { + case "t", "T": + mods[string(fields[2])] = string(bytes.Trim(fields[3], "[]")) + default: + continue + } + } + if scanner.Err() != nil { + return nil, scanner.Err() + } + return mods, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go new file mode 100644 index 0000000000..1921e4f15a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -0,0 +1,293 @@ +package kconfig + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// Find find a kconfig file on the host. +// It first reads from /boot/config- of the current running kernel and tries +// /proc/config.gz if nothing was found in /boot. +// If none of the file provide a kconfig, it returns an error. +func Find() (*os.File, error) { + kernelRelease, err := internal.KernelRelease() + if err != nil { + return nil, fmt.Errorf("cannot get kernel release: %w", err) + } + + path := "/boot/config-" + kernelRelease + f, err := os.Open(path) + if err == nil { + return f, nil + } + + f, err = os.Open("/proc/config.gz") + if err == nil { + return f, nil + } + + return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) +} + +// Parse parses the kconfig file for which a reader is given. +// All the CONFIG_* which are in filter and which are set set will be +// put in the returned map as key with their corresponding value as map value. +// If filter is nil, no filtering will occur. +// If the kconfig file is not valid, error will be returned. +func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) { + var r io.Reader + zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64)) + if err != nil { + r = io.NewSectionReader(source, 0, math.MaxInt64) + } else { + // Source is gzip compressed, transparently decompress. + r = zr + } + + ret := make(map[string]string, len(filter)) + + s := bufio.NewScanner(r) + + for s.Scan() { + line := s.Bytes() + err = processKconfigLine(line, ret, filter) + if err != nil { + return nil, fmt.Errorf("cannot parse line: %w", err) + } + + if filter != nil && len(ret) == len(filter) { + break + } + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("cannot parse: %w", err) + } + + if zr != nil { + return ret, zr.Close() + } + + return ret, nil +} + +// Golang translation of libbpf bpf_object__process_kconfig_line(): +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874 +// It does the same checks but does not put the data inside the BPF map. +func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error { + // Ignore empty lines and "# CONFIG_* is not set". + if !bytes.HasPrefix(line, []byte("CONFIG_")) { + return nil + } + + key, value, found := bytes.Cut(line, []byte{'='}) + if !found { + return fmt.Errorf("line %q does not contain separator '='", line) + } + + if len(value) == 0 { + return fmt.Errorf("line %q has no value", line) + } + + if filter != nil { + // NB: map[string(key)] gets special optimisation help from the compiler + // and doesn't allocate. Don't turn this into a variable. + _, ok := filter[string(key)] + if !ok { + return nil + } + } + + // This can seem odd, but libbpf only sets the value the first time the key is + // met: + // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908 + _, ok := m[string(key)] + if !ok { + m[string(key)] = string(value) + } + + return nil +} + +// PutValue translates the value given as parameter depending on the BTF +// type, the translated value is then written to the byte array. +func PutValue(data []byte, typ btf.Type, value string) error { + typ = btf.UnderlyingType(typ) + + switch value { + case "y", "n", "m": + return putValueTri(data, typ, value) + default: + if strings.HasPrefix(value, `"`) { + return putValueString(data, typ, value) + } + return putValueNumber(data, typ, value) + } +} + +// Golang translation of libbpf_tristate enum: +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169 +type triState int + +const ( + TriNo triState = 0 + TriYes triState = 1 + TriModule triState = 2 +) + +func putValueTri(data []byte, typ btf.Type, value string) error { + switch v := typ.(type) { + case *btf.Int: + if v.Encoding != btf.Bool { + return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding) + } + + if v.Size != 1 { + return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size) + } + + switch value { + case "y": + data[0] = 1 + case "n": + data[0] = 0 + default: + return fmt.Errorf("cannot use %q for btf.Bool", value) + } + case *btf.Enum: + if v.Name != "libbpf_tristate" { + return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name) + } + + var tri triState + switch value { + case "y": + tri = TriYes + case "m": + tri = TriModule + case "n": + tri = TriNo + default: + return fmt.Errorf("value %q is not support for libbpf_tristate", value) + } + + internal.NativeEndian.PutUint64(data, uint64(tri)) + default: + return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v) + } + + return nil +} + +func putValueString(data []byte, typ btf.Type, value string) error { + array, ok := typ.(*btf.Array) + if !ok { + return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array) + } + + contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int) + if !ok { + return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType) + } + + // Any Int, which is not bool, of one byte could be used to store char: + // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638 + if contentType.Size != 1 && contentType.Encoding != btf.Bool { + return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size) + } + + if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) { + return fmt.Errorf(`value %q must start and finish with '"'`, value) + } + + str := strings.Trim(value, `"`) + + // We need to trim string if the bpf array is smaller. + if uint32(len(str)) >= array.Nelems { + str = str[:array.Nelems] + } + + // Write the string content to .kconfig. + copy(data, str) + + return nil +} + +func putValueNumber(data []byte, typ btf.Type, value string) error { + integer, ok := typ.(*btf.Int) + if !ok { + return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer) + } + + size := integer.Size + sizeInBits := size * 8 + + var n uint64 + var err error + if integer.Encoding == btf.Signed { + parsed, e := strconv.ParseInt(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } else { + parsed, e := strconv.ParseUint(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } + + if err != nil { + return fmt.Errorf("cannot parse value: %w", err) + } + + return PutInteger(data, integer, n) +} + +// PutInteger writes n into data. +// +// integer determines how much is written into data and what the valid values +// are. +func PutInteger(data []byte, integer *btf.Int, n uint64) error { + // This function should match set_kcfg_value_num in libbpf. + if integer.Encoding == btf.Bool && n > 1 { + return fmt.Errorf("invalid boolean value: %d", n) + } + + if len(data) < int(integer.Size) { + return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data)) + } + + switch integer.Size { + case 1: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + data[0] = byte(n) + case 2: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint16(data, uint16(n)) + case 4: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint32(data, uint32(n)) + case 8: + internal.NativeEndian.PutUint64(data, uint64(n)) + default: + return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/math.go b/vendor/github.com/cilium/ebpf/internal/math.go new file mode 100644 index 0000000000..e95c8efde5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/math.go @@ -0,0 +1,13 @@ +package internal + +import "golang.org/x/exp/constraints" + +// Align returns 'n' updated to 'alignment' boundary. +func Align[I constraints.Integer](n, alignment I) I { + return (n + alignment - 1) / alignment * alignment +} + +// IsPow returns true if n is a power of two. +func IsPow[I constraints.Integer](n I) bool { + return n != 0 && (n&(n-1)) == 0 +} diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 0000000000..dd6e6cbafe --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,97 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "reflect" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} + +// GoTypeName is like %T, but elides the package name. +// +// Pointers to a type are peeled off. +func GoTypeName(t any) string { + rT := reflect.TypeOf(t) + for rT.Kind() == reflect.Pointer { + rT = rT.Elem() + } + // Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924 + return rT.Name() +} diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go new file mode 100644 index 0000000000..01d892f934 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/pinning.go @@ -0,0 +1,65 @@ +package internal + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +func Pin(currentPath, newPath string, fd *sys.FD) error { + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + fsType, err := FSType(filepath.Dir(newPath)) + if err != nil { + return err + } + if fsType != unix.BPF_FS_MAGIC { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + + if currentPath == "" { + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + // Renameat2 is used instead of os.Rename to disallow the new path replacing + // an existing path. + err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { + // Object is now moved to the new pinning path. + return nil + } + if !os.IsNotExist(err) { + return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) + } + // Internal state not in sync with the file system so let's fix it. + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + err := os.Remove(pinnedPath) + if err == nil || os.IsNotExist(err) { + return nil + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/platform.go b/vendor/github.com/cilium/ebpf/internal/platform.go new file mode 100644 index 0000000000..6e90f2ef71 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform.go @@ -0,0 +1,43 @@ +package internal + +import ( + "runtime" +) + +// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by +// the linux kernel. +// +// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go +// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047 +func PlatformPrefix() string { + switch runtime.GOARCH { + case "386": + return "__ia32_" + case "amd64", "amd64p32": + return "__x64_" + + case "arm", "armbe": + return "__arm_" + case "arm64", "arm64be": + return "__arm64_" + + case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le": + return "__mips_" + + case "s390": + return "__s390_" + case "s390x": + return "__s390x_" + + case "riscv", "riscv64": + return "__riscv_" + + case "ppc": + return "__powerpc_" + case "ppc64", "ppc64le": + return "__powerpc64_" + + default: + return "" + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go new file mode 100644 index 0000000000..d629145b62 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/prog.go @@ -0,0 +1,11 @@ +package internal + +// EmptyBPFContext is the smallest-possible BPF input context to be used for +// invoking `Program.{Run,Benchmark,Test}`. +// +// Programs require a context input buffer of at least 15 bytes. Looking in +// net/bpf/test_run.c, bpf_test_init() requires that the input is at least +// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets +// with invalid pkt_len"), it also requires the skb to be non-empty after +// removing the Layer 2 header. +var EmptyBPFContext = make([]byte, 15) diff --git a/vendor/github.com/cilium/ebpf/internal/statfs.go b/vendor/github.com/cilium/ebpf/internal/statfs.go new file mode 100644 index 0000000000..44c02d676e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/statfs.go @@ -0,0 +1,23 @@ +package internal + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +func FSType(path string) (int64, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return 0, err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + return fsType, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 0000000000..dfe174448e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,6 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 0000000000..941a56fb91 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,133 @@ +package sys + +import ( + "fmt" + "math" + "os" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +type FD struct { + raw int +} + +func newFD(value int) *FD { + if onLeakFD != nil { + // Attempt to store the caller's stack for the given fd value. + // Panic if fds contains an existing stack for the fd. + old, exist := fds.LoadOrStore(value, callersFrames()) + if exist { + f := old.(*runtime.Frames) + panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f))) + } + } + + fd := &FD{value} + runtime.SetFinalizer(fd, (*FD).finalize) + return fd +} + +// finalize is set as the FD's runtime finalizer and +// sends a leak trace before calling FD.Close(). +func (fd *FD) finalize() { + if fd.raw < 0 { + return + } + + // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback + // is invoked at most once for one sys.FD allocation, runtime.Frames can only + // be unwound once. + f, ok := fds.LoadAndDelete(fd.Int()) + if ok && onLeakFD != nil { + onLeakFD(f.(*runtime.Frames)) + } + + _ = fd.Close() +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return fd.raw +} + +func (fd *FD) Uint() uint32 { + if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + return unix.Close(fd.disown()) +} + +func (fd *FD) disown() int { + value := int(fd.raw) + fds.Delete(int(value)) + fd.raw = -1 + + runtime.SetFinalizer(fd, nil) + return value +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +// File takes ownership of FD and turns it into an [*os.File]. +// +// You must not use the FD after the call returns. +// +// Returns nil if the FD is not valid. +func (fd *FD) File(name string) *os.File { + if fd.raw < 0 { + return nil + } + + return os.NewFile(uintptr(fd.disown()), name) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go new file mode 100644 index 0000000000..cd50dd1f64 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go @@ -0,0 +1,93 @@ +package sys + +import ( + "bytes" + "fmt" + "runtime" + "sync" +) + +// OnLeakFD controls tracing [FD] lifetime to detect resources that are not +// closed by Close(). +// +// If fn is not nil, tracing is enabled for all FDs created going forward. fn is +// invoked for all FDs that are closed by the garbage collector instead of an +// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn +// (without disabling tracing in the meantime) will cause a panic. +// +// If fn is nil, tracing will be disabled. Any FDs that have not been closed are +// considered to be leaked, fn will be invoked for them, and the process will be +// terminated. +// +// fn will be invoked at most once for every unique sys.FD allocation since a +// runtime.Frames can only be unwound once. +func OnLeakFD(fn func(*runtime.Frames)) { + // Enable leak tracing if new fn is provided. + if fn != nil { + if onLeakFD != nil { + panic("OnLeakFD called twice with non-nil fn") + } + + onLeakFD = fn + return + } + + // fn is nil past this point. + + if onLeakFD == nil { + return + } + + // Call onLeakFD for all open fds. + if fs := flushFrames(); len(fs) != 0 { + for _, f := range fs { + onLeakFD(f) + } + } + + onLeakFD = nil +} + +var onLeakFD func(*runtime.Frames) + +// fds is a registry of all file descriptors wrapped into sys.fds that were +// created while an fd tracer was active. +var fds sync.Map // map[int]*runtime.Frames + +// flushFrames removes all elements from fds and returns them as a slice. This +// deals with the fact that a runtime.Frames can only be unwound once using +// Next(). +func flushFrames() []*runtime.Frames { + var frames []*runtime.Frames + fds.Range(func(key, value any) bool { + frames = append(frames, value.(*runtime.Frames)) + fds.Delete(key) + return true + }) + return frames +} + +func callersFrames() *runtime.Frames { + c := make([]uintptr, 32) + + // Skip runtime.Callers and this function. + i := runtime.Callers(2, c) + if i == 0 { + return nil + } + + return runtime.CallersFrames(c) +} + +// FormatFrames formats a runtime.Frames as a human-readable string. +func FormatFrames(fs *runtime.Frames) string { + var b bytes.Buffer + for { + f, more := fs.Next() + b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) + if !more { + break + } + } + return b.String() +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go new file mode 100644 index 0000000000..d9fe217222 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -type MapFlags"; DO NOT EDIT. + +package sys + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BPF_F_NO_PREALLOC-1] + _ = x[BPF_F_NO_COMMON_LRU-2] + _ = x[BPF_F_NUMA_NODE-4] + _ = x[BPF_F_RDONLY-8] + _ = x[BPF_F_WRONLY-16] + _ = x[BPF_F_STACK_BUILD_ID-32] + _ = x[BPF_F_ZERO_SEED-64] + _ = x[BPF_F_RDONLY_PROG-128] + _ = x[BPF_F_WRONLY_PROG-256] + _ = x[BPF_F_CLONE-512] + _ = x[BPF_F_MMAPABLE-1024] + _ = x[BPF_F_PRESERVE_ELEMS-2048] + _ = x[BPF_F_INNER_MAP-4096] + _ = x[BPF_F_LINK-8192] + _ = x[BPF_F_PATH_FD-16384] +} + +const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD" + +var _MapFlags_map = map[MapFlags]string{ + 1: _MapFlags_name[0:17], + 2: _MapFlags_name[17:36], + 4: _MapFlags_name[36:51], + 8: _MapFlags_name[51:63], + 16: _MapFlags_name[63:75], + 32: _MapFlags_name[75:95], + 64: _MapFlags_name[95:110], + 128: _MapFlags_name[110:127], + 256: _MapFlags_name[127:144], + 512: _MapFlags_name[144:155], + 1024: _MapFlags_name[155:169], + 2048: _MapFlags_name[169:189], + 4096: _MapFlags_name[189:204], + 8192: _MapFlags_name[204:214], + 16384: _MapFlags_name[214:227], +} + +func (i MapFlags) String() string { + if str, ok := _MapFlags_map[i]; ok { + return str + } + return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go new file mode 100644 index 0000000000..e9bb590597 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -0,0 +1,52 @@ +package sys + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// NewPointer creates a 64-bit pointer from an unsafe Pointer. +func NewPointer(ptr unsafe.Pointer) Pointer { + return Pointer{ptr: ptr} +} + +// NewSlicePointer creates a 64-bit pointer from a byte slice. +func NewSlicePointer(buf []byte) Pointer { + if len(buf) == 0 { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(&buf[0])} +} + +// NewSlicePointerLen creates a 64-bit pointer from a byte slice. +// +// Useful to assign both the pointer and the length in one go. +func NewSlicePointerLen(buf []byte) (Pointer, uint32) { + return NewSlicePointer(buf), uint32(len(buf)) +} + +// NewStringPointer creates a 64-bit pointer from a string. +func NewStringPointer(str string) Pointer { + p, err := unix.BytePtrFromString(str) + if err != nil { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(p)} +} + +// NewStringSlicePointer allocates an array of Pointers to each string in the +// given slice of strings and returns a 64-bit pointer to the start of the +// resulting array. +// +// Use this function to pass arrays of strings as syscall arguments. +func NewStringSlicePointer(strings []string) Pointer { + sp := make([]Pointer, 0, len(strings)) + for _, s := range strings { + sp = append(sp, NewStringPointer(s)) + } + + return Pointer{ptr: unsafe.Pointer(&sp[0])} +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go new file mode 100644 index 0000000000..6278c79c9e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -0,0 +1,14 @@ +//go:build armbe || mips || mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + pad uint32 + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go new file mode 100644 index 0000000000..c27b537e8e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -0,0 +1,14 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer + pad uint32 +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go new file mode 100644 index 0000000000..2d7828230a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -0,0 +1,13 @@ +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go new file mode 100644 index 0000000000..e5337191d6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go @@ -0,0 +1,83 @@ +package sys + +import ( + "fmt" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// A sigset containing only SIGPROF. +var profSet unix.Sigset_t + +func init() { + // See sigsetAdd for details on the implementation. Open coded here so + // that the compiler will check the constant calculations for us. + profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits) +} + +// maskProfilerSignal locks the calling goroutine to its underlying OS thread +// and adds SIGPROF to the thread's signal mask. This prevents pprof from +// interrupting expensive syscalls like e.g. BPF_PROG_LOAD. +// +// The caller must defer unmaskProfilerSignal() to reverse the operation. +func maskProfilerSignal() { + runtime.LockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil { + runtime.UnlockOSThread() + panic(fmt.Errorf("masking profiler signal: %w", err)) + } +} + +// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal +// mask, allowing it to be interrupted for profiling once again. +// +// It also unlocks the current goroutine from its underlying OS thread. +func unmaskProfilerSignal() { + defer runtime.UnlockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil { + panic(fmt.Errorf("unmasking profiler signal: %w", err)) + } +} + +const ( + // Signal is the nth bit in the bitfield. + sigprofBit = int(unix.SIGPROF - 1) + // The number of bits in one Sigset_t word. + wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8 +) + +// sigsetAdd adds signal to set. +// +// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch. +// This function must be able to deal with both and so must avoid any direct +// references to u32 or u64 types. +func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error { + if signal < 1 { + return fmt.Errorf("signal %d must be larger than 0", signal) + } + + // For amd64, runtime.sigaddset() performs the following operation: + // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31) + // + // This trick depends on sigset being two u32's, causing a signal in the + // bottom 31 bits to be written to the low word if bit 32 is low, or the high + // word if bit 32 is high. + + // Signal is the nth bit in the bitfield. + bit := int(signal - 1) + // Word within the sigset the bit needs to be written to. + word := bit / wordBits + + if word >= len(set.Val) { + return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal) + } + + // Write the signal bit into its corresponding word at the corrected offset. + set.Val[word] |= 1 << (bit % wordBits) + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 0000000000..f6b6e93458 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,229 @@ +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// ENOTSUPP is a Linux internal error code that has leaked into UAPI. +// +// It is not the same as ENOTSUP or EOPNOTSUPP. +const ENOTSUPP = syscall.Errno(524) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // Prevent the Go profiler from repeatedly interrupting the verifier, + // which could otherwise lead to a livelock due to receiving EAGAIN. + if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { + maskProfilerSignal() + defer unmaskProfilerSignal() + } + + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: NewPointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [unix.BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +type LogLevel uint32 + +const ( + BPF_LOG_LEVEL1 LogLevel = 1 << iota + BPF_LOG_LEVEL2 + BPF_LOG_STATS +) + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// TypeID identifies a type in a BTF blob. +type TypeID uint32 + +// MapFlags control map behaviour. +type MapFlags uint32 + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type MapFlags + +const ( + BPF_F_NO_PREALLOC MapFlags = 1 << iota + BPF_F_NO_COMMON_LRU + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_STACK_BUILD_ID + BPF_F_ZERO_SEED + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_CLONE + BPF_F_MMAPABLE + BPF_F_PRESERVE_ELEMS + BPF_F_INNER_MAP + BPF_F_LINK + BPF_F_PATH_FD +) + +// Flags used by bpf_mprog. +const ( + BPF_F_REPLACE = 1 << (iota + 2) + BPF_F_BEFORE + BPF_F_AFTER + BPF_F_ID + BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK +) + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +func (we wrappedErrno) Error() string { + if we.Errno == ENOTSUPP { + return "operation not supported" + } + return we.Errno.Error() +} + +type syscallError struct { + error + errno syscall.Errno +} + +func Error(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 0000000000..70e754de71 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,1383 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "unsafe" +) + +type AdjRoomMode uint32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType uint32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + BPF_LSM_CGROUP AttachType = 43 + BPF_STRUCT_OPS AttachType = 44 + BPF_NETFILTER AttachType = 45 + BPF_TCX_INGRESS AttachType = 46 + BPF_TCX_EGRESS AttachType = 47 + BPF_TRACE_UPROBE_MULTI AttachType = 48 + BPF_CGROUP_UNIX_CONNECT AttachType = 49 + BPF_CGROUP_UNIX_SENDMSG AttachType = 50 + BPF_CGROUP_UNIX_RECVMSG AttachType = 51 + BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52 + BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 + BPF_NETKIT_PRIMARY AttachType = 54 + BPF_NETKIT_PEER AttachType = 55 + __MAX_BPF_ATTACH_TYPE AttachType = 56 +) + +type Cmd uint32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 +) + +type FunctionId uint32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + BPF_FUNC_kptr_xchg FunctionId = 194 + BPF_FUNC_map_lookup_percpu_elem FunctionId = 195 + BPF_FUNC_skc_to_mptcp_sock FunctionId = 196 + BPF_FUNC_dynptr_from_mem FunctionId = 197 + BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198 + BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199 + BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200 + BPF_FUNC_dynptr_read FunctionId = 201 + BPF_FUNC_dynptr_write FunctionId = 202 + BPF_FUNC_dynptr_data FunctionId = 203 + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204 + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205 + BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206 + BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207 + BPF_FUNC_ktime_get_tai_ns FunctionId = 208 + BPF_FUNC_user_ringbuf_drain FunctionId = 209 + BPF_FUNC_cgrp_storage_get FunctionId = 210 + BPF_FUNC_cgrp_storage_delete FunctionId = 211 + __BPF_FUNC_MAX_ID FunctionId = 212 +) + +type HdrStartOff uint32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType uint32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + BPF_LINK_TYPE_STRUCT_OPS LinkType = 9 + BPF_LINK_TYPE_NETFILTER LinkType = 10 + BPF_LINK_TYPE_TCX LinkType = 11 + BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 + BPF_LINK_TYPE_NETKIT LinkType = 13 + __MAX_BPF_LINK_TYPE LinkType = 14 +) + +type MapType uint32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 +) + +type PerfEventType uint32 + +const ( + BPF_PERF_EVENT_UNSPEC PerfEventType = 0 + BPF_PERF_EVENT_UPROBE PerfEventType = 1 + BPF_PERF_EVENT_URETPROBE PerfEventType = 2 + BPF_PERF_EVENT_KPROBE PerfEventType = 3 + BPF_PERF_EVENT_KRETPROBE PerfEventType = 4 + BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5 + BPF_PERF_EVENT_EVENT PerfEventType = 6 +) + +type ProgType uint32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 + BPF_PROG_TYPE_NETFILTER ProgType = 32 +) + +type RetCode uint32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 + BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129 +) + +type SkAction uint32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus uint32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType uint32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type TcxActionBase int32 + +const ( + TCX_NEXT TcxActionBase = -1 + TCX_PASS TcxActionBase = 0 + TCX_DROP TcxActionBase = 2 + TCX_REDIRECT TcxActionBase = 7 +) + +type XdpAction uint32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + Btf Pointer + BtfSize uint32 + Id BTFID + Name Pointer + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [48]uint8 +} + +type MapInfo struct { + Type uint32 + Id uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags MapFlags + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId TypeID + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + _ [4]byte + MapExtra uint64 +} + +type ProgInfo struct { + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns uint64 + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms uint64 + JitedFuncLens uint64 + BtfId BTFID + FuncInfoRecSize uint32 + FuncInfo Pointer + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo Pointer + JitedLineInfo uint64 + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 + VerifiedInsns uint32 + AttachBtfObjId BTFID + AttachBtfId TypeID + _ [4]byte +} + +type SkLookup struct { + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct{ Id uint32 } + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfGetNextIdAttr struct { + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BtfLoadAttr struct { + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + BtfLogTrueSize uint32 +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct{ Type uint32 } + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId TypeID + _ [44]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [36]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateKprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + KprobeMultiFlags uint32 + Count uint32 + Syms Pointer + Addrs Pointer + Cookies Pointer + _ [16]byte +} + +func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetfilterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Pf uint32 + Hooknum uint32 + Priority int32 + NetfilterFlags uint32 + _ [32]byte +} + +func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetkitAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [40]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTcxAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTracingAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId BTFID + _ [4]byte + Cookie uint64 + _ [32]byte +} + +func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateUprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Path Pointer + Offsets Pointer + RefCtrOffsets Pointer + Cookies Pointer + Count uint32 + UprobeMultiFlags uint32 + Pid uint32 + _ [4]byte +} + +func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetFdByIdAttr struct{ Id LinkID } + +func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetNextIdAttr struct { + Id LinkID + NextId LinkID +} + +func LinkGetNextId(attr *LinkGetNextIdAttr) error { + _, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type LinkUpdateAttr struct { + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags MapFlags + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxValueTypeId TypeID + MapExtra uint64 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct{ MapFd uint32 } + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct{ Id uint32 } + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + _ [4]byte + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct{ Id uint32 } + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + ProgType ProgType + InsnCnt uint32 + Insns Pointer + License Pointer + LogLevel LogLevel + LogSize uint32 + LogBuf Pointer + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBtfId TypeID + AttachBtfObjFd uint32 + CoreReloCnt uint32 + FdArray Pointer + CoreRelos Pointer + CoreReloRecSize uint32 + LogTrueSize uint32 +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgQueryAttr struct { + TargetFdOrIfindex uint32 + AttachType AttachType + QueryFlags uint32 + AttachFlags uint32 + ProgIds Pointer + Count uint32 + _ [4]byte + ProgAttachFlags Pointer + LinkIds Pointer + LinkAttachFlags Pointer + Revision uint64 +} + +func ProgQuery(attr *ProgQueryAttr) error { + _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgRunAttr struct { + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn Pointer + DataOut Pointer + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn Pointer + CtxOut Pointer + Flags uint32 + Cpu uint32 + BatchSize uint32 + _ [4]byte +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + Name Pointer + ProgFd uint32 + _ [4]byte +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + CgroupId uint64 + AttachType AttachType + _ [36]byte +} + +type IterLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TargetName Pointer + TargetNameLen uint32 +} + +type KprobeLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + FuncName Pointer + NameLen uint32 + Offset uint32 + Addr uint64 + Missed uint64 + _ [8]byte +} + +type KprobeMultiLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Addrs Pointer + Count uint32 + Flags uint32 + Missed uint64 + _ [24]byte +} + +type NetNsLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + NetnsIno uint32 + AttachType AttachType + _ [40]byte +} + +type NetfilterLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 + _ [32]byte +} + +type NetkitLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type PerfEventLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType +} + +type RawTracepointLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TpName Pointer + TpNameLen uint32 + _ [36]byte +} + +type TcxLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type TracingLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + AttachType AttachType + TargetObjId uint32 + TargetBtfId TypeID + _ [36]byte +} + +type XDPLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + _ [44]byte +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go new file mode 100644 index 0000000000..d184ea196a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -0,0 +1,83 @@ +package sysenc + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/sys" +) + +type Buffer struct { + ptr unsafe.Pointer + // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using + // zero-copy unmarshaling. + size int +} + +const syscallPointerOnly = -1 + +func newBuffer(buf []byte) Buffer { + if len(buf) == 0 { + return Buffer{} + } + return Buffer{unsafe.Pointer(&buf[0]), len(buf)} +} + +// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling. +// +// [Pointer] is the only valid method to call on such a Buffer. +// Use [SyscallBuffer] instead if possible. +func UnsafeBuffer(ptr unsafe.Pointer) Buffer { + return Buffer{ptr, syscallPointerOnly} +} + +// SyscallOutput prepares a Buffer for a syscall to write into. +// +// size is the length of the desired buffer in bytes. +// The buffer may point at the underlying memory of dst, in which case [Unmarshal] +// becomes a no-op. +// +// The contents of the buffer are undefined and may be non-zero. +func SyscallOutput(dst any, size int) Buffer { + if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size { + buf := newBuffer(dstBuf) + buf.size = syscallPointerOnly + return buf + } + + return newBuffer(make([]byte, size)) +} + +// CopyTo copies the buffer into dst. +// +// Returns the number of copied bytes. +func (b Buffer) CopyTo(dst []byte) int { + return copy(dst, b.unsafeBytes()) +} + +// AppendTo appends the buffer onto dst. +func (b Buffer) AppendTo(dst []byte) []byte { + return append(dst, b.unsafeBytes()...) +} + +// Pointer returns the location where a syscall should write. +func (b Buffer) Pointer() sys.Pointer { + // NB: This deliberately ignores b.length to support zero-copy + // marshaling / unmarshaling using unsafe.Pointer. + return sys.NewPointer(b.ptr) +} + +// Unmarshal the buffer into the provided value. +func (b Buffer) Unmarshal(data any) error { + if b.size == syscallPointerOnly { + return nil + } + + return Unmarshal(data, b.unsafeBytes()) +} + +func (b Buffer) unsafeBytes() []byte { + if b.size == syscallPointerOnly { + return nil + } + return unsafe.Slice((*byte)(b.ptr), b.size) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go new file mode 100644 index 0000000000..676ad98ba1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go @@ -0,0 +1,3 @@ +// Package sysenc provides efficient conversion of Go values to system +// call interfaces. +package sysenc diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go new file mode 100644 index 0000000000..52d111e7af --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go @@ -0,0 +1,41 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://go.dev/LICENSE. + +package sysenc + +import ( + "reflect" + "sync" +) + +var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool + +func hasUnexportedFields(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Slice, reflect.Array, reflect.Pointer: + return hasUnexportedFields(typ.Elem()) + + case reflect.Struct: + if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok { + return unexported.(bool) + } + + unexported := false + for i, n := 0, typ.NumField(); i < n; i++ { + field := typ.Field(i) + // Package binary allows _ fields but always writes zeroes into them. + if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) { + unexported = true + break + } + } + + hasUnexportedFieldsCache.Store(typ, unexported) + return unexported + + default: + // NB: It's not clear what this means for Chan and so on. + return false + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go new file mode 100644 index 0000000000..0026af8f24 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -0,0 +1,177 @@ +package sysenc + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "reflect" + "slices" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +// Marshal turns data into a byte slice using the system's native endianness. +// +// If possible, avoids allocations by directly using the backing memory +// of data. This means that the variable must not be modified for the lifetime +// of the returned [Buffer]. +// +// Returns an error if the data can't be turned into a byte slice according to +// the behaviour of [binary.Write]. +func Marshal(data any, size int) (Buffer, error) { + if data == nil { + return Buffer{}, errors.New("can't marshal a nil value") + } + + var buf []byte + var err error + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = unsafe.Slice(unsafe.StringData(value), len(value)) + case []byte: + buf = value + case int16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value)) + case uint16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value) + case int32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value)) + case uint32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value) + case int64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value)) + case uint64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value) + default: + if buf := unsafeBackingMemory(data); len(buf) == size { + return newBuffer(buf), nil + } + + wr := internal.NewBuffer(make([]byte, 0, size)) + defer internal.PutBuffer(wr) + + err = binary.Write(wr, internal.NativeEndian, value) + buf = wr.Bytes() + } + if err != nil { + return Buffer{}, err + } + + if len(buf) != size { + return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size) + } + + return newBuffer(buf), nil +} + +var bytesReaderPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Reader) + }, +} + +// Unmarshal a byte slice in the system's native endianness into data. +// +// Returns an error if buf can't be unmarshalled according to the behaviour +// of [binary.Read]. +func Unmarshal(data interface{}, buf []byte) error { + switch value := data.(type) { + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + + case *string: + *value = string(buf) + return nil + + case *[]byte: + // Backwards compat: unmarshaling into a slice replaces the whole slice. + *value = slices.Clone(buf) + return nil + + default: + if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) { + copy(dataBuf, buf) + return nil + } + + rd := bytesReaderPool.Get().(*bytes.Reader) + defer bytesReaderPool.Put(rd) + + rd.Reset(buf) + + if err := binary.Read(rd, internal.NativeEndian, value); err != nil { + return err + } + + if rd.Len() != 0 { + return fmt.Errorf("unmarshaling %T doesn't consume all data", data) + } + + return nil + } +} + +// unsafeBackingMemory returns the backing memory of data if it can be used +// instead of calling into package binary. +// +// Returns nil if the value is not a pointer or a slice, or if it contains +// padding or unexported fields. +func unsafeBackingMemory(data any) []byte { + if data == nil { + return nil + } + + value := reflect.ValueOf(data) + var valueSize int + switch value.Kind() { + case reflect.Pointer: + if value.IsNil() { + return nil + } + + if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice { + valueSize = int(elemType.Size()) + break + } + + // We're dealing with a pointer to a slice. Dereference and + // handle it like a regular slice. + value = value.Elem() + fallthrough + + case reflect.Slice: + valueSize = int(value.Type().Elem().Size()) * value.Len() + + default: + // Prevent Value.UnsafePointer from panicking. + return nil + } + + // Some nil pointer types currently crash binary.Size. Call it after our own + // code so that the panic isn't reachable. + // See https://github.com/golang/go/issues/60892 + if size := binary.Size(data); size == -1 || size != valueSize { + // The type contains padding or unsupported types. + return nil + } + + if hasUnexportedFields(reflect.TypeOf(data)) { + return nil + } + + // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer + // rules because it's very unlikely that the source data has "an equivalent + // memory layout". However, we can make it safe-ish because of the + // following reasons: + // - There is no alignment mismatch since we cast to a type with an + // alignment of 1. + // - There are no pointers in the source type so we don't upset the GC. + // - The length is verified at runtime. + return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize) +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go new file mode 100644 index 0000000000..897740fec0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -0,0 +1,360 @@ +package tracefs + +import ( + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrInvalidInput = errors.New("invalid input") + + ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes") +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type=ProbeType -linecomment + +type ProbeType uint8 + +const ( + Kprobe ProbeType = iota // kprobe + Uprobe // uprobe +) + +func (pt ProbeType) eventsFile() (*os.File, error) { + path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String())) + if err != nil { + return nil, err + } + + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666) +} + +type ProbeArgs struct { + Type ProbeType + Symbol, Group, Path string + Offset, RefCtrOffset, Cookie uint64 + Pid, RetprobeMaxActive int + Ret bool +} + +// RandomGroup generates a pseudorandom string for use as a tracefs group name. +// Returns an error when the output string would exceed 63 characters (kernel +// limitation), when rand.Read() fails or when prefix contains characters not +// allowed by IsValidTraceID. +func RandomGroup(prefix string) (string, error) { + if !validIdentifier(prefix) { + return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput) + } + + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("reading random bytes: %w", err) + } + + group := fmt.Sprintf("%s_%x", prefix, b) + if len(group) > 63 { + return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput) + } + + return group, nil +} + +// validIdentifier implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set +// of characters. Non-empty, first character must not be a number, all +// characters must be alphanumeric or underscore. +func validIdentifier(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + default: + return false + } + } + + return true +} + +func sanitizeTracefsPath(path ...string) (string, error) { + base, err := getTracefsPath() + if err != nil { + return "", err + } + l := filepath.Join(path...) + p := filepath.Join(base, l) + if !strings.HasPrefix(p, base) { + return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput) + } + return p, nil +} + +// getTracefsPath will return a correct path to the tracefs mount point. +// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing, +// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. +// The available tracefs paths will depends on distribution choices. +var getTracefsPath = sync.OnceValues(func() (string, error) { + for _, p := range []struct { + path string + fsType int64 + }{ + {"/sys/kernel/tracing", unix.TRACEFS_MAGIC}, + {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC}, + // RHEL/CentOS + {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC}, + } { + if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType { + return p.path, nil + } + } + + return "", errors.New("neither debugfs nor tracefs are mounted") +}) + +// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore. +// +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeIdentifier(s string) string { + var skip bool + return strings.Map(func(c rune) rune { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + return c + + case skip: + return -1 + + default: + skip = true + return '_' + } + }, s) +} + +// EventID reads a trace event's ID from tracefs given its group and name. +// The kernel requires group and name to be alphanumeric or underscore. +func EventID(group, name string) (uint64, error) { + if !validIdentifier(group) { + return 0, fmt.Errorf("invalid tracefs group: %q", group) + } + + if !validIdentifier(name) { + return 0, fmt.Errorf("invalid tracefs name: %q", name) + } + + path, err := sanitizeTracefsPath("events", group, name, "id") + if err != nil { + return 0, err + } + tid, err := internal.ReadUint64FromFile("%d\n", path) + if errors.Is(err, os.ErrNotExist) { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + } + + return tid, nil +} + +func probePrefix(ret bool, maxActive int) string { + if ret { + if maxActive > 0 { + return fmt.Sprintf("r%d", maxActive) + } + return "r" + } + return "p" +} + +// Event represents an entry in a tracefs probe events file. +type Event struct { + typ ProbeType + group, name string + // event id allocated by the kernel. 0 if the event has already been removed. + id uint64 +} + +// NewEvent creates a new ephemeral trace event. +// +// Returns os.ErrNotExist if symbol is not a valid +// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist +// if a probe with the same group and symbol already exists. Returns an error if +// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if +// the kernel is too old to support kretprobe maxactive. +func NewEvent(args ProbeArgs) (*Event, error) { + // Before attempting to create a trace event through tracefs, + // check if an event with the same group and name already exists. + // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate + // entry, so we need to rely on reads for detecting uniqueness. + eventName := sanitizeIdentifier(args.Symbol) + _, err := EventID(args.Group, eventName) + if err == nil { + return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist) + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err) + } + + // Open the kprobe_events file in tracefs. + f, err := args.Type.eventsFile() + if err != nil { + return nil, err + } + defer f.Close() + + var pe, token string + switch args.Type { + case Kprobe: + // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): + // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe + // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy + // p:ebpf_5678/p_my_kprobe __x64_sys_execve + // + // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the + // kernel default to NR_CPUS. This is desired in most eBPF cases since + // subsampling or rate limiting logic can be more accurately implemented in + // the eBPF program itself. + // See Documentation/kprobes.txt for more details. + if args.RetprobeMaxActive != 0 && !args.Ret { + return nil, ErrInvalidMaxActive + } + token = KprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token) + case Uprobe: + // The uprobe_events syntax is as follows: + // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe + // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/readline /bin/bash:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) + // + // See Documentation/trace/uprobetracer.txt for more details. + if args.RetprobeMaxActive != 0 { + return nil, ErrInvalidMaxActive + } + token = UprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token) + } + _, err = f.WriteString(pe) + + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) + } + + if err != nil { + return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err) + } + + // Get the newly-created trace event's id. + tid, err := EventID(args.Group, eventName) + if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) { + // Kernels < 4.12 don't support maxactive and therefore auto generate + // group and event names from the symbol and offset. The symbol is used + // without any sanitization. + // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712 + event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset) + if err := removeEvent(args.Type, event); err != nil { + return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err) + } + return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("get trace event id: %w", err) + } + + evt := &Event{args.Type, args.Group, eventName, tid} + runtime.SetFinalizer(evt, (*Event).Close) + return evt, nil +} + +// Close removes the event from tracefs. +// +// Returns os.ErrClosed if the event has already been closed before. +func (evt *Event) Close() error { + if evt.id == 0 { + return os.ErrClosed + } + + evt.id = 0 + runtime.SetFinalizer(evt, nil) + pe := fmt.Sprintf("%s/%s", evt.group, evt.name) + return removeEvent(evt.typ, pe) +} + +func removeEvent(typ ProbeType, pe string) error { + f, err := typ.eventsFile() + if err != nil { + return err + } + defer f.Close() + + // See [k,u]probe_events syntax above. The probe type does not need to be specified + // for removals. + if _, err = f.WriteString("-:" + pe); err != nil { + return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err) + } + + return nil +} + +// ID returns the tracefs ID associated with the event. +func (evt *Event) ID() uint64 { + return evt.id +} + +// Group returns the tracefs group used by the event. +func (evt *Event) Group() string { + return evt.group +} + +// KprobeToken creates the SYM[+offs] token for the tracefs api. +func KprobeToken(args ProbeArgs) string { + po := args.Symbol + + if args.Offset != 0 { + po += fmt.Sprintf("+%#x", args.Offset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go new file mode 100644 index 0000000000..87cb0a059b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT. + +package tracefs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Kprobe-0] + _ = x[Uprobe-1] +} + +const _ProbeType_name = "kprobeuprobe" + +var _ProbeType_index = [...]uint8{0, 6, 12} + +func (i ProbeType) String() string { + if i >= ProbeType(len(_ProbeType_index)-1) { + return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go new file mode 100644 index 0000000000..994f31260d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go @@ -0,0 +1,16 @@ +package tracefs + +import "fmt" + +// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func UprobeToken(args ProbeArgs) string { + po := fmt.Sprintf("%s:%#x", args.Path, args.Offset) + + if args.RefCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.RefCtrOffset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go new file mode 100644 index 0000000000..d168d36f18 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go @@ -0,0 +1,11 @@ +// Package unix re-exports Linux specific parts of golang.org/x/sys/unix. +// +// It avoids breaking compilation on other OS by providing stubs as follows: +// - Invoking a function always returns an error. +// - Errnos have distinct, non-zero values. +// - Constants have distinct but meaningless values. +// - Types use the same names for members, but may or may not follow the +// Linux layout. +package unix + +// Note: please don't add any custom API to this package. Use internal/sys instead. diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go new file mode 100644 index 0000000000..d725cfaa39 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -0,0 +1,216 @@ +//go:build linux + +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +const ( + ENOENT = linux.ENOENT + EEXIST = linux.EEXIST + EAGAIN = linux.EAGAIN + ENOSPC = linux.ENOSPC + EINVAL = linux.EINVAL + EPOLLIN = linux.EPOLLIN + EINTR = linux.EINTR + EPERM = linux.EPERM + ESRCH = linux.ESRCH + ENODEV = linux.ENODEV + EBADF = linux.EBADF + E2BIG = linux.E2BIG + EFAULT = linux.EFAULT + EACCES = linux.EACCES + EILSEQ = linux.EILSEQ + EOPNOTSUPP = linux.EOPNOTSUPP + ESTALE = linux.ESTALE +) + +const ( + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN + BPF_F_LOCK = linux.BPF_F_LOCK + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_NONE = linux.PROT_NONE + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_ANON = linux.MAP_ANON + MAP_SHARED = linux.MAP_SHARED + MAP_PRIVATE = linux.MAP_PRIVATE + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PerfBitWriteBackward = linux.PerfBitWriteBackward + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET + SIGPROF = linux.SIGPROF + SIG_BLOCK = linux.SIG_BLOCK + SIG_UNBLOCK = linux.SIG_UNBLOCK + EM_NONE = linux.EM_NONE + EM_BPF = linux.EM_BPF + BPF_FS_MAGIC = linux.BPF_FS_MAGIC + TRACEFS_MAGIC = linux.TRACEFS_MAGIC + DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP +) + +type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t +type Rlimit = linux.Rlimit +type Signal = linux.Signal +type Sigset_t = linux.Sigset_t +type PerfEventMmapPage = linux.PerfEventMmapPage +type EpollEvent = linux.EpollEvent +type PerfEventAttr = linux.PerfEventAttr +type Utsname = linux.Utsname +type CPUSet = linux.CPUSet + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return linux.Syscall(trap, a1, a2, a3) +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return linux.PthreadSigmask(how, set, oldset) +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return linux.FcntlInt(fd, cmd, arg) +} + +func IoctlSetInt(fd int, req uint, value int) error { + return linux.IoctlSetInt(fd, req, value) +} + +func Statfs(path string, buf *Statfs_t) (err error) { + return linux.Statfs(path, buf) +} + +func Close(fd int) (err error) { + return linux.Close(fd) +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return linux.EpollWait(epfd, events, msec) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return linux.EpollCtl(epfd, op, fd, event) +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return linux.Eventfd(initval, flags) +} + +func Write(fd int, p []byte) (n int, err error) { + return linux.Write(fd, p) +} + +func EpollCreate1(flag int) (fd int, err error) { + return linux.EpollCreate1(flag) +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return linux.SetNonblock(fd, nonblocking) +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return linux.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return linux.Munmap(b) +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) +} + +func Uname(buf *Utsname) (err error) { + return linux.Uname(buf) +} + +func Getpid() int { + return linux.Getpid() +} + +func Gettid() int { + return linux.Gettid() +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return linux.Tgkill(tgid, tid, sig) +} + +func BytePtrFromString(s string) (*byte, error) { + return linux.BytePtrFromString(s) +} + +func ByteSliceToString(s []byte) string { + return linux.ByteSliceToString(s) +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} + +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) +} + +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) +} + +func SetsockoptInt(fd, level, opt, value int) error { + return linux.SetsockoptInt(fd, level, opt, value) +} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return linux.SchedSetaffinity(pid, set) +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return linux.SchedGetaffinity(pid, set) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go new file mode 100644 index 0000000000..3ff8962716 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -0,0 +1,311 @@ +//go:build !linux + +package unix + +import ( + "fmt" + "runtime" + "syscall" +) + +var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + +// Errnos are distinct and non-zero. +const ( + ENOENT syscall.Errno = iota + 1 + EEXIST + EAGAIN + ENOSPC + EINVAL + EINTR + EPERM + ESRCH + ENODEV + EBADF + E2BIG + EFAULT + EACCES + EILSEQ + EOPNOTSUPP + ESTALE +) + +// Constants are distinct to avoid breaking switch statements. +const ( + BPF_F_NO_PREALLOC = iota + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE + BPF_F_MMAPABLE + BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN + BPF_F_XDP_HAS_FRAGS + BPF_OBJ_NAME_LEN + BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ + SYS_BPF + F_DUPFD_CLOEXEC + EPOLLIN + EPOLL_CTL_ADD + EPOLL_CLOEXEC + O_CLOEXEC + O_NONBLOCK + PROT_NONE + PROT_READ + PROT_WRITE + MAP_ANON + MAP_SHARED + MAP_PRIVATE + PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF + PerfBitWatermark + PerfBitWriteBackward + PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY + RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME + PERF_RECORD_LOST + PERF_RECORD_SAMPLE + AT_FDCWD + RENAME_NOREPLACE + SO_ATTACH_BPF + SO_DETACH_BPF + SOL_SOCKET + SIGPROF + SIG_BLOCK + SIG_UNBLOCK + EM_NONE + EM_BPF + BPF_FS_MAGIC + TRACEFS_MAGIC + DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP + BPF_F_LOCK +) + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid [2]int32 + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type Signal int + +type Sigset_t struct { + Val [4]uint64 +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return 0, 0, syscall.ENOTSUP +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return errNonLinux +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return -1, errNonLinux +} + +func IoctlSetInt(fd int, req uint, value int) error { + return errNonLinux +} + +func Statfs(path string, buf *Statfs_t) error { + return errNonLinux +} + +func Close(fd int) (err error) { + return errNonLinux +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return 0, errNonLinux +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return errNonLinux +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return 0, errNonLinux +} + +func Write(fd int, p []byte) (n int, err error) { + return 0, errNonLinux +} + +func EpollCreate1(flag int) (fd int, err error) { + return 0, errNonLinux +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return errNonLinux +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return []byte{}, errNonLinux +} + +func Munmap(b []byte) (err error) { + return errNonLinux +} + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return 0, errNonLinux +} + +type Utsname struct { + Release [65]byte + Version [65]byte +} + +func Uname(buf *Utsname) (err error) { + return errNonLinux +} + +func Getpid() int { + return -1 +} + +func Gettid() int { + return -1 +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return errNonLinux +} + +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux +} + +func ByteSliceToString(s []byte) string { + return "" +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return errNonLinux +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux +} + +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux +} + +func Fstat(fd int, stat *Stat_t) error { + return errNonLinux +} + +func SetsockoptInt(fd, level, opt, value int) error { + return errNonLinux +} + +type CPUSet struct{} + +func (*CPUSet) Set(int) {} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return errNonLinux +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return errNonLinux +} diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go new file mode 100644 index 0000000000..1049278554 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/vdso.go @@ -0,0 +1,143 @@ +package internal + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + av, err := newAuxvRuntimeReader() + if err != nil { + return 0, err + } + + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) { + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + for { + tag, value, err := r.ReadAuxvPair() + if err != nil { + return 0, err + } + + switch tag { + case _AT_SYSINFO_EHDR: + if value != 0 { + return uintptr(value), nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, Align(n.NameSize, 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go new file mode 100644 index 0000000000..acd4650af7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -0,0 +1,107 @@ +package internal + +import ( + "fmt" + "sync" + + "github.com/cilium/ebpf/internal/unix" +) + +const ( + // Version constant used in ELF binaries indicating that the loader needs to + // substitute the eBPF program's version with the value of the kernel's + // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf + // and RedSift. + MagicKernelVersion = 0xFFFFFFFE +) + +// A Version in the form Major.Minor.Patch. +type Version [3]uint16 + +// NewVersion creates a version from a string like "Major.Minor.Patch". +// +// Patch is optional. +func NewVersion(ver string) (Version, error) { + var major, minor, patch uint16 + n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) + if n < 2 { + return Version{}, fmt.Errorf("invalid version: %s", ver) + } + return Version{major, minor, patch}, nil +} + +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + +func (v Version) String() string { + if v[2] == 0 { + return fmt.Sprintf("v%d.%d", v[0], v[1]) + } + return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) +} + +// Less returns true if the version is less than another version. +func (v Version) Less(other Version) bool { + for i, a := range v { + if a == other[i] { + continue + } + return a < other[i] + } + return false +} + +// Unspecified returns true if the version is all zero. +func (v Version) Unspecified() bool { + return v[0] == 0 && v[1] == 0 && v[2] == 0 +} + +// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. +// It represents the kernel version and patch level as a single value. +func (v Version) Kernel() uint32 { + + // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid + // overflowing into PATCHLEVEL. + // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). + s := v[2] + if s > 255 { + s = 255 + } + + // Truncate members to uint8 to prevent them from spilling over into + // each other when overflowing 8 bits. + return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) +} + +// KernelVersion returns the version of the currently running kernel. +var KernelVersion = sync.OnceValues(func() (Version, error) { + return detectKernelVersion() +}) + +// detectKernelVersion returns the version of the running kernel. +func detectKernelVersion() (Version, error) { + vc, err := vdsoVersion() + if err != nil { + return Version{}, err + } + return NewVersionFromCode(vc), nil +} + +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) + } + + return unix.ByteSliceToString(uname.Release[:]), nil +} diff --git a/vendor/github.com/cilium/ebpf/link/anchor.go b/vendor/github.com/cilium/ebpf/link/anchor.go new file mode 100644 index 0000000000..1a3b5f7681 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/anchor.go @@ -0,0 +1,137 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const anchorFlags = sys.BPF_F_REPLACE | + sys.BPF_F_BEFORE | + sys.BPF_F_AFTER | + sys.BPF_F_ID | + sys.BPF_F_LINK_MPROG + +// Anchor is a reference to a link or program. +// +// It is used to describe where an attachment or detachment should take place +// for link types which support multiple attachment. +type Anchor interface { + // anchor returns an fd or ID and a set of flags. + // + // By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG + // changes this to refer to a link instead. + // + // BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program + // is attached. The default behaviour if none of these flags is specified + // matches BPF_F_AFTER. + anchor() (fdOrID, flags uint32, _ error) +} + +type firstAnchor struct{} + +func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_BEFORE, nil +} + +// Head is the position before all other programs or links. +func Head() Anchor { + return firstAnchor{} +} + +type lastAnchor struct{} + +func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_AFTER, nil +} + +// Tail is the position after all other programs or links. +func Tail() Anchor { + return lastAnchor{} +} + +// Before is the position just in front of target. +func BeforeLink(target Link) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLink(target Link) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +// Before is the position just in front of target. +func BeforeProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +type anchor struct { + target any + position uint32 +} + +func (ap anchor) anchor() (fdOrID, flags uint32, _ error) { + var typeFlag uint32 + switch target := ap.target.(type) { + case *ebpf.Program: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = 0 + case ebpf.ProgramID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_ID + case interface{ FD() int }: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = sys.BPF_F_LINK_MPROG + case ID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID + default: + return 0, 0, fmt.Errorf("invalid target %T", ap.target) + } + + return fdOrID, ap.position | typeFlag, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go new file mode 100644 index 0000000000..f17d34f03c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -0,0 +1,208 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type cgroupAttachFlags uint32 + +const ( + // Allow programs attached to sub-cgroups to override the verdict of this + // program. + flagAllowOverride cgroupAttachFlags = 1 << iota + // Allow attaching multiple programs to the cgroup. Only works if the cgroup + // has zero or more programs attached using the Multi flag. Implies override. + flagAllowMulti + // Set automatically by progAttachCgroup.Update(). Used for updating a + // specific given program attached in multi-mode. + flagReplace +) + +type CgroupOptions struct { + // Path to a cgroupv2 folder. + Path string + // One of the AttachCgroup* constants + Attach ebpf.AttachType + // Program must be of type CGroup*, and the attach type must match Attach. + Program *ebpf.Program +} + +// AttachCgroup links a BPF program to a cgroup. +// +// If the running kernel doesn't support bpf_link, attempts to emulate its +// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not +// available, the returned [Link] will not support pinning to bpffs. +// +// If you need more control over attachment flags or the attachment mechanism +// used, look at [RawAttachProgram] and [AttachRawLink] instead. +func AttachCgroup(opts CgroupOptions) (cg Link, err error) { + cgroup, err := os.Open(opts.Path) + if err != nil { + return nil, fmt.Errorf("can't open cgroup: %s", err) + } + defer func() { + if _, ok := cg.(*progAttachCgroup); ok { + // Skip closing the cgroup handle if we return a valid progAttachCgroup, + // where the handle is retained to implement Update(). + return + } + cgroup.Close() + }() + + cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program) + if err == nil { + return cg, nil + } + + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti) + } + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride) + } + if err != nil { + return nil, err + } + + return cg, nil +} + +type progAttachCgroup struct { + cgroup *os.File + current *ebpf.Program + attachType ebpf.AttachType + flags cgroupAttachFlags +} + +var _ Link = (*progAttachCgroup)(nil) + +func (cg *progAttachCgroup) isLink() {} + +// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH. +// cgroup and prog are retained by [progAttachCgroup]. +func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) { + if flags&flagAllowMulti > 0 { + if err := haveProgAttachReplace(); err != nil { + return nil, fmt.Errorf("can't support multiple programs: %w", err) + } + } + + // Use a program handle that cannot be closed by the caller. + clone, err := prog.Clone() + if err != nil { + return nil, err + } + + err = RawAttachProgram(RawAttachProgramOptions{ + Target: int(cgroup.Fd()), + Program: clone, + Flags: uint32(flags), + Attach: attach, + }) + if err != nil { + clone.Close() + return nil, fmt.Errorf("cgroup: %w", err) + } + + return &progAttachCgroup{cgroup, clone, attach, flags}, nil +} + +func (cg *progAttachCgroup) Close() error { + defer cg.cgroup.Close() + defer cg.current.Close() + + err := RawDetachProgram(RawDetachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: cg.current, + Attach: cg.attachType, + }) + if err != nil { + return fmt.Errorf("close cgroup: %s", err) + } + return nil +} + +func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { + new, err := prog.Clone() + if err != nil { + return err + } + + args := RawAttachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: prog, + Attach: cg.attachType, + Flags: uint32(cg.flags), + } + + if cg.flags&flagAllowMulti > 0 { + // Atomically replacing multiple programs requires at least + // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf + // program in MULTI mode") + args.Anchor = ReplaceProgram(cg.current) + } + + if err := RawAttachProgram(args); err != nil { + new.Close() + return fmt.Errorf("can't update cgroup: %s", err) + } + + cg.current.Close() + cg.current = new + return nil +} + +func (cg *progAttachCgroup) Pin(string) error { + return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Unpin() error { + return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Info() (*Info, error) { + return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) +} + +type linkCgroup struct { + RawLink +} + +var _ Link = (*linkCgroup)(nil) + +// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE. +func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { + link, err := AttachRawLink(RawLinkOptions{ + Target: int(cgroup.Fd()), + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &linkCgroup{*link}, err +} + +func (cg *linkCgroup) Info() (*Info, error) { + var info sys.CgroupLinkInfo + if err := sys.ObjInfo(cg.fd, &info); err != nil { + return nil, fmt.Errorf("cgroup link info: %s", err) + } + extra := &CgroupInfo{ + CgroupId: info.CgroupId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go new file mode 100644 index 0000000000..2bde35ed7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/doc.go @@ -0,0 +1,2 @@ +// Package link allows attaching eBPF programs to various kernel hooks. +package link diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go new file mode 100644 index 0000000000..0a39faef88 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/iter.go @@ -0,0 +1,84 @@ +package link + +import ( + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type IterOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceIter. The kind of iterator to attach to is + // determined at load time via the AttachTo field. + // + // AttachTo requires the kernel to include BTF of itself, + // and it to be compiled with a recent pahole (>= 1.16). + Program *ebpf.Program + + // Map specifies the target map for bpf_map_elem and sockmap iterators. + // It may be nil. + Map *ebpf.Map +} + +// AttachIter attaches a BPF seq_file iterator. +func AttachIter(opts IterOptions) (*Iter, error) { + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + var info bpfIterLinkInfoMap + if opts.Map != nil { + mapFd := opts.Map.FD() + if mapFd < 0 { + return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) + } + info.map_fd = uint32(mapFd) + } + + attr := sys.LinkCreateIterAttr{ + ProgFd: uint32(progFd), + AttachType: sys.AttachType(ebpf.AttachTraceIter), + IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfoLen: uint32(unsafe.Sizeof(info)), + } + + fd, err := sys.LinkCreateIter(&attr) + if err != nil { + if haveFeatErr := haveBPFLink(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("can't link iterator: %w", err) + } + + return &Iter{RawLink{fd, ""}}, err +} + +// Iter represents an attached bpf_iter. +type Iter struct { + RawLink +} + +// Open creates a new instance of the iterator. +// +// Reading from the returned reader triggers the BPF program. +func (it *Iter) Open() (io.ReadCloser, error) { + attr := &sys.IterCreateAttr{ + LinkFd: it.fd.Uint(), + } + + fd, err := sys.IterCreate(attr) + if err != nil { + return nil, fmt.Errorf("can't create iterator: %w", err) + } + + return fd.File("bpf_iter"), nil +} + +// union bpf_iter_link_info.map +type bpfIterLinkInfoMap struct { + map_fd uint32 +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go new file mode 100644 index 0000000000..fe3f17c371 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -0,0 +1,365 @@ +package link + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeOptions defines additional parameters that will be used +// when loading Kprobes. +type KprobeOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Offset of the kprobe relative to the traced symbol. + // Can be used to insert kprobes at arbitrary offsets in kernel functions, + // e.g. in places where functions have been inlined. + Offset uint64 + // Increase the maximum number of concurrent invocations of a kretprobe. + // Required when tracing some long running functions in the kernel. + // + // Deprecated: this setting forces the use of an outdated kernel API and is not portable + // across kernel versions. + RetprobeMaxActive int + // Prefix used for the event name if the kprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (ko *KprobeOptions) cookie() uint64 { + if ko == nil { + return 0 + } + return ko.Cookie +} + +// Kprobe attaches the given eBPF program to a perf event that fires when the +// given kernel symbol starts executing. See /proc/kallsyms for available +// symbols. For example, printk(): +// +// kp, err := Kprobe("printk", prog, nil) +// +// Losing the reference to the resulting Link (kp) will close the Kprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// The returned Link may implement [PerfEvent]. +func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(k, prog, opts.cookie()) + if err != nil { + k.Close() + return nil, err + } + + return lnk, nil +} + +// Kretprobe attaches the given eBPF program to a perf event that fires right +// before the given kernel symbol exits, with the function stack left intact. +// See /proc/kallsyms for available symbols. For example, printk(): +// +// kp, err := Kretprobe("printk", prog, nil) +// +// Losing the reference to the resulting Link (kp) will close the Kretprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol +// incorrectly returns unix.EINVAL instead of os.ErrNotExist. +// +// The returned Link may implement [PerfEvent]. +func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(k, prog, opts.cookie()) + if err != nil { + k.Close() + return nil, err + } + + return lnk, nil +} + +// isValidKprobeSymbol implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_.]*$". +func isValidKprobeSymbol(s string) bool { + if len(s) < 1 { + return false + } + + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + // Allow `.` in symbol name. GCC-compiled kernel may change symbol name + // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. + // See: https://gcc.gnu.org/gcc-10/changes.html + case i > 0 && c == '.': + + default: + return false + } + } + + return true +} + +// kprobe opens a perf event on the given symbol and attaches prog to it. +// If ret is true, create a kretprobe. +func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { + if symbol == "" { + return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if !isValidKprobeSymbol(symbol) { + return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Pid: perfAllThreads, + Symbol: symbol, + Ret: ret, + } + + if opts != nil { + args.RetprobeMaxActive = opts.RetprobeMaxActive + args.Cookie = opts.Cookie + args.Offset = opts.Offset + args.Group = opts.TraceFSPrefix + } + + // Use kprobe PMU if the kernel has it available. + tp, err := pmuProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := internal.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = pmuProbe(args) + } + } + if err == nil { + return tp, nil + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err) + } + + // Use tracefs if kprobe PMU is missing. + args.Symbol = symbol + tp, err = tracefsProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := internal.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = tracefsProbe(args) + } + } + if err != nil { + return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err) + } + + return tp, nil +} + +// pmuProbe opens a perf event based on a Performance Monitoring Unit. +// +// Requires at least a 4.17 kernel. +// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU" +// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" +// +// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU +func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + // Getting the PMU type will fail if the kernel doesn't support + // the perf_[k,u]probe PMU. + eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type") + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported) + } + if err != nil { + return nil, err + } + + // Use tracefs if we want to set kretprobe's retprobeMaxActive. + if args.RetprobeMaxActive != 0 { + return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported) + } + + var config uint64 + if args.Ret { + bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe") + if err != nil { + return nil, err + } + config |= 1 << bit + } + + var ( + attr unix.PerfEventAttr + sp unsafe.Pointer + token string + ) + switch args.Type { + case tracefs.Kprobe: + // Create a pointer to a NUL-terminated string for the kernel. + sp, err = unsafeStringPtr(args.Symbol) + if err != nil { + return nil, err + } + + token = tracefs.KprobeToken(args) + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. Use Ext2 as probe_offset. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Ext2: args.Offset, // Kernel symbol offset + Config: config, // Retprobe flag + } + case tracefs.Uprobe: + sp, err = unsafeStringPtr(args.Path) + if err != nil { + return nil, err + } + + if args.RefCtrOffset != 0 { + config |= args.RefCtrOffset << uprobeRefCtrOffsetShift + } + + token = tracefs.UprobeToken(args) + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. The Size field controls the + // size of the internal buffer the kernel allocates for reading the + // perf_event_attr argument from userspace. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Uprobe path + Ext2: args.Offset, // Uprobe offset + Config: config, // RefCtrOffset, Retprobe flag + } + } + + cpu := 0 + if args.Pid != perfAllThreads { + cpu = -1 + } + rawFd, err := unix.PerfEventOpen(&attr, args.Pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + + // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and + // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. + // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 + if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") { + return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported) + } + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, unix.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned + // when attempting to set a uprobe on a trap instruction. + if errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err) + } + + if err != nil { + return nil, fmt.Errorf("token %s: opening perf event: %w", token, err) + } + + // Ensure the string pointer is not collected before PerfEventOpen returns. + runtime.KeepAlive(sp) + + fd, err := sys.NewFD(rawFd) + if err != nil { + return nil, err + } + + // Kernel has perf_[k,u]probe PMU available, initialize perf event. + return newPerfEvent(fd, nil), nil +} + +// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. +// A new trace event group name is generated on every call to support creating +// multiple trace events for the same kernel or userspace symbol. +// Path and offset are only set in the case of uprobe(s) and are used to set +// the executable/library path on the filesystem and the offset where the probe is inserted. +// A perf event is then opened on the newly-created trace event and returned to the caller. +func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + groupPrefix := "ebpf" + if args.Group != "" { + groupPrefix = args.Group + } + + // Generate a random string for each trace event we attempt to create. + // This value is used as the 'group' token in tracefs to allow creating + // multiple kprobe trace events with the same name. + group, err := tracefs.RandomGroup(groupPrefix) + if err != nil { + return nil, fmt.Errorf("randomizing group name: %w", err) + } + args.Group = group + + // Create the [k,u]probe trace event using tracefs. + evt, err := tracefs.NewEvent(args) + if err != nil { + return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) + } + + // Kprobes are ephemeral tracepoints and share the same perf event type. + fd, err := openTracepointPerfEvent(evt.ID(), args.Pid) + if err != nil { + // Make sure we clean up the created tracefs event when we return error. + // If a livepatch handler is already active on the symbol, the write to + // tracefs will succeed, a trace event will show up, but creating the + // perf event will fail with EBUSY. + _ = evt.Close() + return nil, err + } + + return newPerfEvent(fd, evt), nil +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go new file mode 100644 index 0000000000..f7a8291f94 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go @@ -0,0 +1,191 @@ +package link + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeMultiOptions defines additional parameters that will be used +// when opening a KprobeMulti Link. +type KprobeMultiOptions struct { + // Symbols takes a list of kernel symbol names to attach an ebpf program to. + // + // Mutually exclusive with Addresses. + Symbols []string + + // Addresses takes a list of kernel symbol addresses in case they can not + // be referred to by name. + // + // Note that only start addresses can be specified, since the fprobe API + // limits the attach point to the function entry or return. + // + // Mutually exclusive with Symbols. + Addresses []uintptr + + // Cookies specifies arbitrary values that can be fetched from an eBPF + // program via `bpf_get_attach_cookie()`. + // + // If set, its length should be equal to the length of Symbols or Addresses. + // Each Cookie is assigned to the Symbol or Address specified at the + // corresponding slice index. + Cookies []uint64 +} + +// KprobeMulti attaches the given eBPF program to the entry point of a given set +// of kernel symbols. +// +// The difference with Kprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, 0) +} + +// KretprobeMulti attaches the given eBPF program to the return point of a given +// set of kernel symbols. +// +// The difference with Kretprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN) +} + +func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + syms := uint32(len(opts.Symbols)) + addrs := uint32(len(opts.Addresses)) + cookies := uint32(len(opts.Cookies)) + + if syms == 0 && addrs == 0 { + return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput) + } + if syms != 0 && addrs != 0 { + return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput) + } + if cookies > 0 && cookies != syms && cookies != addrs { + return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + KprobeMultiFlags: flags, + } + + switch { + case syms != 0: + attr.Count = syms + attr.Syms = sys.NewStringSlicePointer(opts.Symbols) + + case addrs != 0: + attr.Count = addrs + attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0])) + } + + if cookies != 0 { + attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + } + + fd, err := sys.LinkCreateKprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &kprobeMultiLink{RawLink{fd, ""}}, nil +} + +type kprobeMultiLink struct { + RawLink +} + +var _ Link = (*kprobeMultiLink)(nil) + +func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported) +} + +func (kml *kprobeMultiLink) Info() (*Info, error) { + var info sys.KprobeMultiLinkInfo + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("kprobe multi link info: %s", err) + } + extra := &KprobeMultiInfo{ + count: info.Count, + flags: info.Flags, + missed: info.Missed, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kpm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}) diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go new file mode 100644 index 0000000000..9c34616c9a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -0,0 +1,530 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +var ErrNotSupported = internal.ErrNotSupported + +// Link represents a Program attached to a BPF hook. +type Link interface { + // Replace the current program with a new program. + // + // Passing a nil program is an error. May return an error wrapping ErrNotSupported. + Update(*ebpf.Program) error + + // Persist a link by pinning it into a bpffs. + // + // May return an error wrapping ErrNotSupported. + Pin(string) error + + // Undo a previous call to Pin. + // + // May return an error wrapping ErrNotSupported. + Unpin() error + + // Close frees resources. + // + // The link will be broken unless it has been successfully pinned. + // A link may continue past the lifetime of the process if Close is + // not called. + Close() error + + // Info returns metadata on a link. + // + // May return an error wrapping ErrNotSupported. + Info() (*Info, error) + + // Prevent external users from implementing this interface. + isLink() +} + +// NewLinkFromFD creates a link from a raw fd. +// +// Deprecated: use [NewFromFD] instead. +func NewLinkFromFD(fd int) (Link, error) { + return NewFromFD(fd) +} + +// NewFromFD creates a link from a raw fd. +// +// You should not use fd after calling this function. +func NewFromFD(fd int) (Link, error) { + sysFD, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return wrapRawLink(&RawLink{fd: sysFD}) +} + +// NewFromID returns the link associated with the given id. +// +// Returns ErrNotExist if there is no link with the given id. +func NewFromID(id ID) (Link, error) { + getFdAttr := &sys.LinkGetFdByIdAttr{Id: id} + fd, err := sys.LinkGetFdById(getFdAttr) + if err != nil { + return nil, fmt.Errorf("get link fd from ID %d: %w", id, err) + } + + return wrapRawLink(&RawLink{fd, ""}) +} + +// LoadPinnedLink loads a link that was persisted into a bpffs. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err + } + + return wrapRawLink(raw) +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (_ Link, err error) { + defer func() { + if err != nil { + raw.Close() + } + }() + + info, err := raw.Info() + if err != nil { + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + case KprobeMultiType: + return &kprobeMultiLink{*raw}, nil + case UprobeMultiType: + return &uprobeMultiLink{*raw}, nil + case PerfEventType: + return &perfEventLink{*raw, nil}, nil + case TCXType: + return &tcxLink{*raw}, nil + case NetfilterType: + return &netfilterLink{*raw}, nil + case NetkitType: + return &netkitLink{*raw}, nil + case XDPType: + return &xdpLink{*raw}, nil + default: + return raw, nil + } +} + +// ID uniquely identifies a BPF link. +type ID = sys.LinkID + +// RawLinkOptions control the creation of a raw link. +type RawLinkOptions struct { + // File descriptor to attach to. This differs for each attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // BTF is the BTF of the attachment target. + BTF btf.TypeID + // Flags control the attach behaviour. + Flags uint32 +} + +// Info contains metadata on a link. +type Info struct { + Type Type + ID ID + Program ebpf.ProgramID + extra interface{} +} + +type TracingInfo struct { + AttachType sys.AttachType + TargetObjId uint32 + TargetBtfId sys.TypeID +} + +type CgroupInfo struct { + CgroupId uint64 + AttachType sys.AttachType + _ [4]byte +} + +type NetNsInfo struct { + NetnsIno uint32 + AttachType sys.AttachType +} + +type TCXInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type XDPInfo struct { + Ifindex uint32 +} + +type NetfilterInfo struct { + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 +} + +type NetkitInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type KprobeMultiInfo struct { + count uint32 + flags uint32 + missed uint64 +} + +// AddressCount is the number of addresses hooked by the kprobe. +func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { + return kpm.count, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { + return kpm.flags, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { + return kpm.missed, kpm.count > 0 +} + +type PerfEventInfo struct { + Type sys.PerfEventType + extra interface{} +} + +func (r *PerfEventInfo) Kprobe() *KprobeInfo { + e, _ := r.extra.(*KprobeInfo) + return e +} + +type KprobeInfo struct { + address uint64 + missed uint64 +} + +func (kp *KprobeInfo) Address() (uint64, bool) { + return kp.address, kp.address > 0 +} + +func (kp *KprobeInfo) Missed() (uint64, bool) { + return kp.missed, kp.address > 0 +} + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// XDP returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e +} + +// TCX returns TCX type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) TCX() *TCXInfo { + e, _ := r.extra.(*TCXInfo) + return e +} + +// Netfilter returns netfilter type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netfilter() *NetfilterInfo { + e, _ := r.extra.(*NetfilterInfo) + return e +} + +// Netkit returns netkit type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netkit() *NetkitInfo { + e, _ := r.extra.(*NetkitInfo) + return e +} + +// KprobeMulti returns kprobe-multi type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) KprobeMulti() *KprobeMultiInfo { + e, _ := r.extra.(*KprobeMultiInfo) + return e +} + +// PerfEvent returns perf-event type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) PerfEvent() *PerfEventInfo { + e, _ := r.extra.(*PerfEventInfo) + return e +} + +// RawLink is the low-level API to bpf_link. +// +// You should consider using the higher level interfaces in this +// package instead. +type RawLink struct { + fd *sys.FD + pinnedPath string +} + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + if opts.Target < 0 { + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(opts.Attach), + TargetBtfId: opts.BTF, + Flags: opts.Flags, + } + fd, err := sys.LinkCreate(&attr) + if err != nil { + return nil, fmt.Errorf("create link: %w", err) + } + + return &RawLink{fd, ""}, nil +} + +func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, fmt.Errorf("load pinned link: %w", err) + } + + return &RawLink{fd, fileName}, nil +} + +func (l *RawLink) isLink() {} + +// FD returns the raw file descriptor. +func (l *RawLink) FD() int { + return l.fd.Int() +} + +// Close breaks the link. +// +// Use Pin if you want to make the link persistent. +func (l *RawLink) Close() error { + return l.fd.Close() +} + +// Pin persists a link past the lifetime of the process. +// +// Calling Close on a pinned Link will not break the link +// until the pin is removed. +func (l *RawLink) Pin(fileName string) error { + if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { + return err + } + l.pinnedPath = fileName + return nil +} + +// Unpin implements the Link interface. +func (l *RawLink) Unpin() error { + if err := internal.Unpin(l.pinnedPath); err != nil { + return err + } + l.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Link has a non-empty pinned path. +func (l *RawLink) IsPinned() bool { + return l.pinnedPath != "" +} + +// Update implements the Link interface. +func (l *RawLink) Update(new *ebpf.Program) error { + return l.UpdateArgs(RawLinkUpdateOptions{ + New: new, + }) +} + +// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs. +type RawLinkUpdateOptions struct { + New *ebpf.Program + Old *ebpf.Program + Flags uint32 +} + +// UpdateArgs updates a link based on args. +func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { + newFd := opts.New.FD() + if newFd < 0 { + return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + var oldFd int + if opts.Old != nil { + oldFd = opts.Old.FD() + if oldFd < 0 { + return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) + } + } + + attr := sys.LinkUpdateAttr{ + LinkFd: l.fd.Uint(), + NewProgFd: uint32(newFd), + OldProgFd: uint32(oldFd), + Flags: opts.Flags, + } + return sys.LinkUpdate(&attr) +} + +// Info returns metadata about the link. +// +// Linktype specific metadata is not included and can be retrieved +// via the linktype specific Info() method. +func (l *RawLink) Info() (*Info, error) { + var info sys.LinkInfo + + if err := sys.ObjInfo(l.fd, &info); err != nil { + return nil, fmt.Errorf("link info: %s", err) + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + nil, + }, nil +} + +// Iterator allows iterating over links attached into the kernel. +type Iterator struct { + // The ID of the current link. Only valid after a call to Next + ID ID + // The current link. Only valid until a call to Next. + // See Take if you want to retain the link. + Link Link + err error +} + +// Next retrieves the next link. +// +// Returns true if another link was found. Call [Iterator.Err] after the function returns false. +func (it *Iterator) Next() bool { + id := it.ID + for { + getIdAttr := &sys.LinkGetNextIdAttr{Id: id} + err := sys.LinkGetNextId(getIdAttr) + if errors.Is(err, os.ErrNotExist) { + // There are no more links. + break + } else if err != nil { + it.err = fmt.Errorf("get next link ID: %w", err) + break + } + + id = getIdAttr.NextId + l, err := NewFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Couldn't load the link fast enough. Try next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("get link for ID %d: %w", id, err) + break + } + + if it.Link != nil { + it.Link.Close() + } + it.ID, it.Link = id, l + return true + } + + // No more links or we encountered an error. + if it.Link != nil { + it.Link.Close() + } + it.Link = nil + return false +} + +// Take the ownership of the current link. +// +// It's the callers responsibility to close the link. +func (it *Iterator) Take() Link { + l := it.Link + it.Link = nil + return l +} + +// Err returns an error if iteration failed for some reason. +func (it *Iterator) Err() error { + return it.err +} + +func (it *Iterator) Close() { + if it.Link != nil { + it.Link.Close() + } +} diff --git a/vendor/github.com/cilium/ebpf/link/netfilter.go b/vendor/github.com/cilium/ebpf/link/netfilter.go new file mode 100644 index 0000000000..34be390859 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netfilter.go @@ -0,0 +1,90 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmentation + +type NetfilterAttachFlags uint32 + +type NetfilterOptions struct { + // Program must be a netfilter BPF program. + Program *ebpf.Program + // The protocol family. + ProtocolFamily uint32 + // The number of the hook you are interested in. + HookNumber uint32 + // Priority within hook + Priority int32 + // Extra link flags + Flags uint32 + // Netfilter flags + NetfilterFlags NetfilterAttachFlags +} + +type netfilterLink struct { + RawLink +} + +// AttachNetfilter links a netfilter BPF program to a netfilter hook. +func AttachNetfilter(opts NetfilterOptions) (Link, error) { + if opts.Program == nil { + return nil, fmt.Errorf("netfilter program is nil") + } + + if t := opts.Program.Type(); t != ebpf.Netfilter { + return nil, fmt.Errorf("invalid program type %s, expected netfilter", t) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateNetfilterAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.BPF_NETFILTER, + Flags: opts.Flags, + Pf: uint32(opts.ProtocolFamily), + Hooknum: uint32(opts.HookNumber), + Priority: opts.Priority, + NetfilterFlags: uint32(opts.NetfilterFlags), + } + + fd, err := sys.LinkCreateNetfilter(&attr) + if err != nil { + return nil, fmt.Errorf("attach netfilter link: %w", err) + } + + return &netfilterLink{RawLink{fd, ""}}, nil +} + +func (*netfilterLink) Update(new *ebpf.Program) error { + return fmt.Errorf("netfilter update: %w", ErrNotSupported) +} + +func (nf *netfilterLink) Info() (*Info, error) { + var info sys.NetfilterLinkInfo + if err := sys.ObjInfo(nf.fd, &info); err != nil { + return nil, fmt.Errorf("netfilter link info: %s", err) + } + extra := &NetfilterInfo{ + Pf: info.Pf, + Hooknum: info.Hooknum, + Priority: info.Priority, + Flags: info.Flags, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var _ Link = (*netfilterLink)(nil) diff --git a/vendor/github.com/cilium/ebpf/link/netkit.go b/vendor/github.com/cilium/ebpf/link/netkit.go new file mode 100644 index 0000000000..5eee3b023a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netkit.go @@ -0,0 +1,89 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type NetkitOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachNetkit* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachNetkit(opts NetkitOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateNetkitAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateNetkit(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveNetkit(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + return &netkitLink{RawLink{fd, ""}}, nil +} + +type netkitLink struct { + RawLink +} + +var _ Link = (*netkitLink)(nil) + +func (netkit *netkitLink) Info() (*Info, error) { + var info sys.NetkitLinkInfo + if err := sys.ObjInfo(netkit.fd, &info); err != nil { + return nil, fmt.Errorf("netkit link info: %s", err) + } + extra := &NetkitInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go new file mode 100644 index 0000000000..b1edd340a3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netns.go @@ -0,0 +1,55 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// NetNsLink is a program attached to a network namespace. +type NetNsLink struct { + RawLink +} + +// AttachNetNs attaches a program to a network namespace. +func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { + var attach ebpf.AttachType + switch t := prog.Type(); t { + case ebpf.FlowDissector: + attach = ebpf.AttachFlowDissector + case ebpf.SkLookup: + attach = ebpf.AttachSkLookup + default: + return nil, fmt.Errorf("can't attach %v to network namespace", t) + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: ns, + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &NetNsLink{*link}, nil +} + +func (ns *NetNsLink) Info() (*Info, error) { + var info sys.NetNsLinkInfo + if err := sys.ObjInfo(ns.fd, &info); err != nil { + return nil, fmt.Errorf("netns link info: %s", err) + } + extra := &NetNsInfo{ + NetnsIno: info.NetnsIno, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go new file mode 100644 index 0000000000..1d8feb58c1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -0,0 +1,332 @@ +package link + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +// Getting the terminology right is usually the hardest part. For posterity and +// for staying sane during implementation: +// +// - trace event: Representation of a kernel runtime hook. Filesystem entries +// under /events. Can be tracepoints (static), kprobes or uprobes. +// Can be instantiated into perf events (see below). +// - tracepoint: A predetermined hook point in the kernel. Exposed as trace +// events in (sub)directories under /events. Cannot be closed or +// removed, they are static. +// - k(ret)probe: Ephemeral trace events based on entry or exit points of +// exported kernel symbols. kprobe-based (tracefs) trace events can be +// created system-wide by writing to the /kprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries +// and offsets. uprobe-based (tracefs) trace events can be +// created system-wide by writing to the /uprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - perf event: An object instantiated based on an existing trace event or +// kernel symbol. Referred to by fd in userspace. +// Exactly one eBPF program can be attached to a perf event. Multiple perf +// events can be created from a single trace event. Closing a perf event +// stops any further invocations of the attached eBPF program. + +var ( + errInvalidInput = tracefs.ErrInvalidInput +) + +const ( + perfAllThreads = -1 +) + +// A perfEvent represents a perf event kernel object. Exactly one eBPF program +// can be attached to it. It is created based on a tracefs trace event or a +// Performance Monitoring Unit (PMU). +type perfEvent struct { + // Trace event backing this perfEvent. May be nil. + tracefsEvent *tracefs.Event + + // This is the perf event FD. + fd *sys.FD +} + +func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent { + pe := &perfEvent{event, fd} + // Both event and fd have their own finalizer, but we want to + // guarantee that they are closed in a certain order. + runtime.SetFinalizer(pe, (*perfEvent).Close) + return pe +} + +func (pe *perfEvent) Close() error { + runtime.SetFinalizer(pe, nil) + + if err := pe.fd.Close(); err != nil { + return fmt.Errorf("closing perf event fd: %w", err) + } + + if pe.tracefsEvent != nil { + return pe.tracefsEvent.Close() + } + + return nil +} + +// PerfEvent is implemented by some Link types which use a perf event under +// the hood. +type PerfEvent interface { + // PerfEvent returns a file for the underlying perf event. + // + // It is the callers responsibility to close the returned file. + // + // Making changes to the associated perf event lead to + // undefined behaviour. + PerfEvent() (*os.File, error) +} + +// perfEventLink represents a bpf perf link. +type perfEventLink struct { + RawLink + pe *perfEvent +} + +func (pl *perfEventLink) isLink() {} + +func (pl *perfEventLink) Close() error { + if err := pl.fd.Close(); err != nil { + return fmt.Errorf("perf link close: %w", err) + } + + // when created from pinned link + if pl.pe == nil { + return nil + } + + if err := pl.pe.Close(); err != nil { + return fmt.Errorf("perf event close: %w", err) + } + return nil +} + +func (pl *perfEventLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event link update: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventLink)(nil) + +func (pl *perfEventLink) PerfEvent() (*os.File, error) { + // when created from pinned link + if pl.pe == nil { + return nil, ErrNotSupported + } + + fd, err := pl.pe.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event"), nil +} + +func (pl *perfEventLink) Info() (*Info, error) { + var info sys.PerfEventLinkInfo + if err := sys.ObjInfo(pl.fd, &info); err != nil { + return nil, fmt.Errorf("perf event link info: %s", err) + } + + var extra2 interface{} + switch info.PerfEventType { + case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE: + var kprobeInfo sys.KprobeLinkInfo + if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil { + return nil, fmt.Errorf("kprobe link info: %s", err) + } + extra2 = &KprobeInfo{ + address: kprobeInfo.Addr, + missed: kprobeInfo.Missed, + } + } + + extra := &PerfEventInfo{ + Type: info.PerfEventType, + extra: extra2, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +// perfEventIoctl implements Link and handles the perf event lifecycle +// via ioctl(). +type perfEventIoctl struct { + *perfEvent +} + +func (pi *perfEventIoctl) isLink() {} + +// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), +// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array +// owned by the perf event, which means multiple programs can be attached +// simultaneously. +// +// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event +// returns EEXIST. +// +// Detaching a program from a perf event is currently not possible, so a +// program replacement mechanism cannot be implemented for perf events. +func (pi *perfEventIoctl) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Pin(string) error { + return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Unpin() error { + return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Info() (*Info, error) { + return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventIoctl)(nil) + +func (pi *perfEventIoctl) PerfEvent() (*os.File, error) { + fd, err := pi.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event"), nil +} + +// attach the given eBPF prog to the perf event stored in pe. +// pe must contain a valid perf event fd. +// prog's type must match the program type stored in pe. +func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + if prog.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + } + + if err := haveBPFLinkPerfEvent(); err == nil { + return attachPerfEventLink(pe, prog, cookie) + } + + if cookie != 0 { + return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) + } + + return attachPerfEventIoctl(pe, prog) +} + +func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { + // Assign the eBPF program to the perf event. + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + if err != nil { + return nil, fmt.Errorf("setting perf event bpf program: %w", err) + } + + // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. + if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return nil, fmt.Errorf("enable perf event: %s", err) + } + + return &perfEventIoctl{pe}, nil +} + +// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). +// +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) { + fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + TargetFd: pe.fd.Uint(), + AttachType: sys.BPF_PERF_EVENT, + BpfCookie: cookie, + }) + if err != nil { + return nil, fmt.Errorf("cannot create bpf perf link: %v", err) + } + + return &perfEventLink{RawLink{fd: fd}, pe}, nil +} + +// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. +func unsafeStringPtr(str string) (unsafe.Pointer, error) { + p, err := unix.BytePtrFromString(str) + if err != nil { + return nil, err + } + return unsafe.Pointer(p), nil +} + +// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide +// [k,u]probes created by writing to /[k,u]probe_events are tracepoints +// behind the scenes, and can be attached to using these perf events. +func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { + attr := unix.PerfEventAttr{ + Type: unix.PERF_TYPE_TRACEPOINT, + Config: tid, + Sample_type: unix.PERF_SAMPLE_RAW, + Sample: 1, + Wakeup: 1, + } + + cpu := 0 + if pid != perfAllThreads { + cpu = -1 + } + fd, err := unix.PerfEventOpen(&attr, pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + if err != nil { + return nil, fmt.Errorf("opening tracepoint perf event: %w", err) + } + + return sys.NewFD(fd) +} + +// Probe BPF perf link. +// +// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_bpf_perf_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + return err + } + defer prog.Close() + + _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_PERF_EVENT, + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go new file mode 100644 index 0000000000..d8a2a15f93 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/program.go @@ -0,0 +1,107 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type RawAttachProgramOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional. + Flags uint32 + // Only attach if the internal revision matches the given value. + ExpectedRevision uint64 +} + +// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawAttachProgram(opts RawAttachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.ProgAttachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + AttachFlags: uint32(opts.Flags), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("attach program: %w", err) + } + + if flags == sys.BPF_F_REPLACE { + // Ensure that replacing a program works on old kernels. + attr.ReplaceBpfFd = fdOrID + } else { + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + } + + if err := sys.ProgAttach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("attach program: %w", err) + } + runtime.KeepAlive(opts.Program) + + return nil +} + +type RawDetachProgramOptions RawAttachProgramOptions + +// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawDetachProgram(opts RawDetachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.ProgDetachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("detach program: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + + if err := sys.ProgDetach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("can't detach program: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go new file mode 100644 index 0000000000..fe534f8efa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/query.go @@ -0,0 +1,111 @@ +package link + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// QueryOptions defines additional parameters when querying for programs. +type QueryOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Attach specifies the AttachType of the programs queried for + Attach ebpf.AttachType + // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE + QueryFlags uint32 +} + +// QueryResult describes which programs and links are active. +type QueryResult struct { + // List of attached programs. + Programs []AttachedProgram + + // Incremented by one every time the set of attached programs changes. + // May be zero if not supported by the [ebpf.AttachType]. + Revision uint64 +} + +// HaveLinkInfo returns true if the kernel supports querying link information +// for a particular [ebpf.AttachType]. +func (qr *QueryResult) HaveLinkInfo() bool { + return qr.Revision > 0 +} + +type AttachedProgram struct { + ID ebpf.ProgramID + linkID ID +} + +// LinkID returns the ID associated with the program. +// +// Returns 0, false if the kernel doesn't support retrieving the ID or if the +// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you +// need to tell the two apart. +func (ap *AttachedProgram) LinkID() (ID, bool) { + return ap.linkID, ap.linkID != 0 +} + +// QueryPrograms retrieves a list of programs for the given AttachType. +// +// Returns a slice of attached programs, which may be empty. +// revision counts how many times the set of attached programs has changed and +// may be zero if not supported by the [ebpf.AttachType]. +// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY +func QueryPrograms(opts QueryOptions) (*QueryResult, error) { + // query the number of programs to allocate correct slice size + attr := sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + } + err := sys.ProgQuery(&attr) + if err != nil { + if haveFeatErr := haveProgQuery(); haveFeatErr != nil { + return nil, fmt.Errorf("query programs: %w", haveFeatErr) + } + return nil, fmt.Errorf("query programs: %w", err) + } + if attr.Count == 0 { + return &QueryResult{Revision: attr.Revision}, nil + } + + // The minimum bpf_mprog revision is 1, so we can use the field to detect + // whether the attach type supports link ids. + haveLinkIDs := attr.Revision != 0 + + count := attr.Count + progIds := make([]ebpf.ProgramID, count) + attr = sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + Count: count, + ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])), + } + + var linkIds []ID + if haveLinkIDs { + linkIds = make([]ID, count) + attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0])) + } + + if err := sys.ProgQuery(&attr); err != nil { + return nil, fmt.Errorf("query programs: %w", err) + } + + // NB: attr.Count might have changed between the two syscalls. + var programs []AttachedProgram + for i, id := range progIds[:attr.Count] { + ap := AttachedProgram{ID: id} + if haveLinkIDs { + ap.linkID = linkIds[i] + } + programs = append(programs, ap) + } + + return &QueryResult{programs, attr.Revision}, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go new file mode 100644 index 0000000000..925e621cbb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -0,0 +1,87 @@ +package link + +import ( + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type RawTracepointOptions struct { + // Tracepoint name. + Name string + // Program must be of type RawTracepoint* + Program *ebpf.Program +} + +// AttachRawTracepoint links a BPF program to a raw_tracepoint. +// +// Requires at least Linux 4.17. +func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable { + return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) + } + if opts.Program.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + } + + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + Name: sys.NewStringPointer(opts.Name), + ProgFd: uint32(opts.Program.FD()), + }) + if err != nil { + return nil, err + } + + err = haveBPFLink() + if errors.Is(err, ErrNotSupported) { + // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") + // raw_tracepoints are just a plain fd. + return &simpleRawTracepoint{fd}, nil + } + + if err != nil { + return nil, err + } + + return &rawTracepoint{RawLink{fd: fd}}, nil +} + +type simpleRawTracepoint struct { + fd *sys.FD +} + +var _ Link = (*simpleRawTracepoint)(nil) + +func (frt *simpleRawTracepoint) isLink() {} + +func (frt *simpleRawTracepoint) Close() error { + return frt.fd.Close() +} + +func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Pin(string) error { + return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Unpin() error { + return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Info() (*Info, error) { + return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) +} + +type rawTracepoint struct { + RawLink +} + +var _ Link = (*rawTracepoint)(nil) + +func (rt *rawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go new file mode 100644 index 0000000000..84f0b656f8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -0,0 +1,40 @@ +package link + +import ( + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/unix" +) + +// AttachSocketFilter attaches a SocketFilter BPF program to a socket. +func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + }) + if ssoErr != nil { + return ssoErr + } + return err +} + +// DetachSocketFilter detaches a SocketFilter BPF program from a socket. +func DetachSocketFilter(conn syscall.Conn) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + }) + if ssoErr != nil { + return ssoErr + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go new file mode 100644 index 0000000000..d09b5acb0f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -0,0 +1,200 @@ +package link + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Type is the kind of link. +type Type = sys.LinkType + +// Valid link types. +const ( + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT + KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI + TCXType = sys.BPF_LINK_TYPE_TCX + UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI + NetfilterType = sys.BPF_LINK_TYPE_NETFILTER + NetkitType = sys.BPF_LINK_TYPE_NETKIT +) + +var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + return internal.ErrNotSupported + } + + // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB, + // so being able to load the program is enough to infer that we + // have the syscall. + prog.Close() + return nil +}) + +var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error { + if err := haveProgAttach(); err != nil { + return err + } + + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + + // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. + // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't + // present. + attr := sys.ProgAttachAttr{ + // We rely on this being checked after attachFlags. + TargetFdOrIfindex: ^uint32(0), + AttachBpfFd: uint32(prog.FD()), + AttachType: uint32(ebpf.AttachCGroupInetIngress), + AttachFlags: uint32(flagReplace), + } + + err = sys.ProgAttach(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error { + attr := sys.LinkCreateAttr{ + // This is a hopefully invalid file descriptor, which triggers EBADF. + TargetFd: ^uint32(0), + ProgFd: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + _, err := sys.LinkCreate(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error { + attr := sys.ProgQueryAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect EBADF here + // as an indication that the feature is present. + TargetFdOrIfindex: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + + err := sys.ProgQuery(&attr) + + if errors.Is(err, unix.EBADF) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) + +var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateTcxAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachTCXIngress), + } + + _, err = sys.LinkCreateTcx(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) + +var haveNetkit = internal.NewFeatureTest("netkit", "6.7", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateNetkitAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachNetkitPrimary), + } + + _, err = sys.LinkCreateNetkit(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) diff --git a/vendor/github.com/cilium/ebpf/link/tcx.go b/vendor/github.com/cilium/ebpf/link/tcx.go new file mode 100644 index 0000000000..ac045b71da --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tcx.go @@ -0,0 +1,89 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type TCXOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachTCX* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachTCX(opts TCXOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateTcxAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateTcx(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveTCX(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + return &tcxLink{RawLink{fd, ""}}, nil +} + +type tcxLink struct { + RawLink +} + +var _ Link = (*tcxLink)(nil) + +func (tcx *tcxLink) Info() (*Info, error) { + var info sys.TcxLinkInfo + if err := sys.ObjInfo(tcx.fd, &info); err != nil { + return nil, fmt.Errorf("tcx link info: %s", err) + } + extra := &TCXInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go new file mode 100644 index 0000000000..6fc78b9828 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -0,0 +1,70 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/tracefs" +) + +// TracepointOptions defines additional parameters that will be used +// when loading Tracepoints. +type TracepointOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 +} + +// Tracepoint attaches the given eBPF program to the tracepoint with the given +// group and name. See /sys/kernel/tracing/events to find available +// tracepoints. The top-level directory is the group, the event's subdirectory +// is the name. Example: +// +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) +// +// Losing the reference to the resulting Link (tp) will close the Tracepoint +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is +// only possible as of kernel 4.14 (commit cf5f5ce). +// +// The returned Link may implement [PerfEvent]. +func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { + if group == "" || name == "" { + return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.TracePoint { + return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) + } + + tid, err := tracefs.EventID(group, name) + if err != nil { + return nil, err + } + + fd, err := openTracepointPerfEvent(tid, perfAllThreads) + if err != nil { + return nil, err + } + + var cookie uint64 + if opts != nil { + cookie = opts.Cookie + } + + pe := newPerfEvent(fd, nil) + + lnk, err := attachPerfEvent(pe, prog, cookie) + if err != nil { + pe.Close() + return nil, err + } + + return lnk, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go new file mode 100644 index 0000000000..9e570afc96 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracing.go @@ -0,0 +1,218 @@ +package link + +import ( + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +type tracing struct { + RawLink +} + +func (f *tracing) Update(new *ebpf.Program) error { + return fmt.Errorf("tracing update: %w", ErrNotSupported) +} + +func (f *tracing) Info() (*Info, error) { + var info sys.TracingLinkInfo + if err := sys.ObjInfo(f.fd, &info); err != nil { + return nil, fmt.Errorf("tracing link info: %s", err) + } + extra := &TracingInfo{ + TargetObjId: info.TargetObjId, + TargetBtfId: info.TargetBtfId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + btfHandle, err := targetProg.Handle() + if err != nil { + return nil, err + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return nil, err + } + + var function *btf.Func + if err := spec.TypeByName(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID, err = spec.TypeID(function) + if err != nil { + return nil, err + } + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, err + } + + return &tracing{*link}, nil +} + +type TracingOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or + // AttachTraceRawTp. + Program *ebpf.Program + // Program attach type. Can be one of: + // - AttachTraceFEntry + // - AttachTraceFExit + // - AttachModifyReturn + // - AttachTraceRawTp + // This field is optional. + AttachType ebpf.AttachType + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 +} + +type LSMOptions struct { + // Program must be of type LSM with attach type + // AttachLSMMac. + Program *ebpf.Program + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 +} + +// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. +func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) { + if program.FD() < 0 { + return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) + } + + var ( + fd *sys.FD + err error + ) + switch at { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp, + ebpf.AttachModifyReturn, ebpf.AttachLSMMac: + // Attach via BPF link + fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{ + ProgFd: uint32(program.FD()), + AttachType: sys.AttachType(at), + Cookie: cookie, + }) + if err == nil { + break + } + if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("create tracing link: %w", err) + } + fallthrough + case ebpf.AttachNone: + // Attach via RawTracepointOpen + if cookie > 0 { + return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported) + } + + fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("create raw tracepoint: %w", err) + } + default: + return nil, fmt.Errorf("invalid attach type: %s", at.String()) + } + + raw := RawLink{fd: fd} + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + if info.Type == RawTracepointType { + // Sadness upon sadness: a Tracing program with AttachRawTp returns + // a raw_tracepoint link. Other types return a tracing link. + return &rawTracepoint{raw}, nil + } + return &tracing{raw}, nil +} + +// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or +// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined +// in kernel modules. +func AttachTracing(opts TracingOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.Tracing { + return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) + } + + switch opts.AttachType { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn, + ebpf.AttachTraceRawTp, ebpf.AttachNone: + default: + return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String()) + } + + return attachBTFID(opts.Program, opts.AttachType, opts.Cookie) +} + +// AttachLSM links a Linux security module (LSM) BPF Program to a BPF +// hook defined in kernel modules. +func AttachLSM(opts LSMOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.LSM { + return nil, fmt.Errorf("invalid program type %s, expected LSM", t) + } + + return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie) +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go new file mode 100644 index 0000000000..194d1d319a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -0,0 +1,335 @@ +package link + +import ( + "debug/elf" + "errors" + "fmt" + "os" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/tracefs" +) + +var ( + uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 + uprobeRefCtrOffsetShift = 32 + haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error { + _, err := os.Stat(uprobeRefCtrOffsetPMUPath) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + return nil + }) + + // ErrNoSymbol indicates that the given symbol was not found + // in the ELF symbols table. + ErrNoSymbol = errors.New("not found") +) + +// Executable defines an executable program on the filesystem. +type Executable struct { + // Path of the executable on the filesystem. + path string + // Parsed ELF and dynamic symbols' cachedAddresses. + cachedAddresses map[string]uint64 + // Keep track of symbol table lazy load. + cacheAddressesOnce sync.Once +} + +// UprobeOptions defines additional parameters that will be used +// when loading Uprobes. +type UprobeOptions struct { + // Symbol address. Must be provided in case of external symbols (shared libs). + // If set, overrides the address eventually parsed from the executable. + Address uint64 + // The offset relative to given symbol. Useful when tracing an arbitrary point + // inside the frame of given symbol. + // + // Note: this field changed from being an absolute offset to being relative + // to Address. + Offset uint64 + // Only set the uprobe on the given process ID. Useful when tracing + // shared library calls or programs that have many running instances. + PID int + // Automatically manage SDT reference counts (semaphores). + // + // If this field is set, the Kernel will increment/decrement the + // semaphore located in the process memory at the provided address on + // probe attach/detach. + // + // See also: + // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) + // github.com/torvalds/linux/commit/1cc33161a83d + // github.com/torvalds/linux/commit/a6ca88b241d5 + RefCtrOffset uint64 + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Prefix used for the event name if the uprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (uo *UprobeOptions) cookie() uint64 { + if uo == nil { + return 0 + } + return uo.Cookie +} + +// To open a new Executable, use: +// +// OpenExecutable("/bin/bash") +// +// The returned value can then be used to open Uprobe(s). +func OpenExecutable(path string) (*Executable, error) { + if path == "" { + return nil, fmt.Errorf("path cannot be empty") + } + + f, err := internal.OpenSafeELFFile(path) + if err != nil { + return nil, fmt.Errorf("parse ELF file: %w", err) + } + defer f.Close() + + if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + return nil, errors.New("the given file is not an executable or a shared object") + } + + return &Executable{ + path: path, + cachedAddresses: make(map[string]uint64), + }, nil +} + +func (ex *Executable) load(f *internal.SafeELFFile) error { + syms, err := f.Symbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + dynsyms, err := f.DynamicSymbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + syms = append(syms, dynsyms...) + + for _, s := range syms { + if elf.ST_TYPE(s.Info) != elf.STT_FUNC { + // Symbol not associated with a function or other executable code. + continue + } + + address := s.Value + + // Loop over ELF segments. + for _, prog := range f.Progs { + // Skip uninteresting segments. + if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { + continue + } + + if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { + // If the symbol value is contained in the segment, calculate + // the symbol offset. + // + // fn symbol offset = fn symbol VA - .text VA + .text offset + // + // stackoverflow.com/a/40249502 + address = s.Value - prog.Vaddr + prog.Off + break + } + } + + ex.cachedAddresses[s.Name] = address + } + + return nil +} + +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { + if address > 0 { + return address + offset, nil + } + + var err error + ex.cacheAddressesOnce.Do(func() { + var f *internal.SafeELFFile + f, err = internal.OpenSafeELFFile(ex.path) + if err != nil { + err = fmt.Errorf("parse ELF file: %w", err) + return + } + defer f.Close() + + err = ex.load(f) + }) + if err != nil { + return 0, fmt.Errorf("lazy load symbols: %w", err) + } + + address, ok := ex.cachedAddresses[symbol] + if !ok { + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + } + + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if address == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ + "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) + } + + return address + offset, nil +} + +// Uprobe attaches the given eBPF program to a perf event that fires when the +// given symbol starts executing in the given Executable. +// For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. +func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(u, prog, opts.cookie()) + if err != nil { + u.Close() + return nil, err + } + + return lnk, nil +} + +// Uretprobe attaches the given eBPF program to a perf event that fires right +// before the given symbol exits. For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uretprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. +func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(u, prog, opts.cookie()) + if err != nil { + u.Close() + return nil, err + } + + return lnk, nil +} + +// uprobe opens a perf event for the given binary/symbol and attaches prog to it. +// If ret is true, create a uretprobe. +func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) { + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) + } + if opts == nil { + opts = &UprobeOptions{} + } + + offset, err := ex.address(symbol, opts.Address, opts.Offset) + if err != nil { + return nil, err + } + + pid := opts.PID + if pid == 0 { + pid = perfAllThreads + } + + if opts.RefCtrOffset != 0 { + if err := haveRefCtrOffsetPMU(); err != nil { + return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) + } + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Uprobe, + Symbol: symbol, + Path: ex.path, + Offset: offset, + Pid: pid, + RefCtrOffset: opts.RefCtrOffset, + Ret: ret, + Cookie: opts.Cookie, + Group: opts.TraceFSPrefix, + } + + // Use uprobe PMU if the kernel has it available. + tp, err := pmuProbe(args) + if err == nil { + return tp, nil + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) + } + + // Use tracefs if uprobe PMU is missing. + tp, err = tracefsProbe(args) + if err != nil { + return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) + } + + return tp, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go new file mode 100644 index 0000000000..aea807b329 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go @@ -0,0 +1,216 @@ +package link + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// UprobeMultiOptions defines additional parameters that will be used +// when opening a UprobeMulti Link. +type UprobeMultiOptions struct { + // Symbol addresses. If set, overrides the addresses eventually parsed from + // the executable. Mutually exclusive with UprobeMulti's symbols argument. + Addresses []uint64 + + // Offsets into functions provided by UprobeMulti's symbols argument. + // For example: to set uprobes to main+5 and _start+10, call UprobeMulti + // with: + // symbols: "main", "_start" + // opt.Offsets: 5, 10 + Offsets []uint64 + + // Optional list of associated ref counter offsets. + RefCtrOffsets []uint64 + + // Optional list of associated BPF cookies. + Cookies []uint64 + + // Only set the uprobe_multi link on the given process ID, zero PID means + // system-wide. + PID uint32 +} + +func (ex *Executable) UprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + return ex.uprobeMulti(symbols, prog, opts, 0) +} + +func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + + // The return probe is not limited for symbols entry, so there's no special + // setup for return uprobes (other than the extra flag). The symbols, opts.Offsets + // and opts.Addresses arrays follow the same logic as for entry uprobes. + return ex.uprobeMulti(symbols, prog, opts, unix.BPF_F_UPROBE_MULTI_RETURN) +} + +func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + if opts == nil { + opts = &UprobeMultiOptions{} + } + + addresses, err := ex.addresses(symbols, opts.Addresses, opts.Offsets) + if err != nil { + return nil, err + } + + addrs := len(addresses) + cookies := len(opts.Cookies) + refCtrOffsets := len(opts.RefCtrOffsets) + + if addrs == 0 { + return nil, fmt.Errorf("Addresses are required: %w", errInvalidInput) + } + if refCtrOffsets > 0 && refCtrOffsets != addrs { + return nil, fmt.Errorf("RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput) + } + if cookies > 0 && cookies != addrs { + return nil, fmt.Errorf("Cookies must be exactly Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateUprobeMultiAttr{ + Path: sys.NewStringPointer(ex.path), + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + UprobeMultiFlags: flags, + Count: uint32(addrs), + Offsets: sys.NewPointer(unsafe.Pointer(&addresses[0])), + Pid: opts.PID, + } + + if refCtrOffsets != 0 { + attr.RefCtrOffsets = sys.NewPointer(unsafe.Pointer(&opts.RefCtrOffsets[0])) + } + if cookies != 0 { + attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + } + + fd, err := sys.LinkCreateUprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &uprobeMultiLink{RawLink{fd, ""}}, nil +} + +func (ex *Executable) addresses(symbols []string, addresses, offsets []uint64) ([]uint64, error) { + n := len(symbols) + if n == 0 { + n = len(addresses) + } + + if n == 0 { + return nil, fmt.Errorf("%w: neither symbols nor addresses given", errInvalidInput) + } + + if symbols != nil && len(symbols) != n { + return nil, fmt.Errorf("%w: have %d symbols but want %d", errInvalidInput, len(symbols), n) + } + + if addresses != nil && len(addresses) != n { + return nil, fmt.Errorf("%w: have %d addresses but want %d", errInvalidInput, len(addresses), n) + } + + if offsets != nil && len(offsets) != n { + return nil, fmt.Errorf("%w: have %d offsets but want %d", errInvalidInput, len(offsets), n) + } + + results := make([]uint64, 0, n) + for i := 0; i < n; i++ { + var sym string + if symbols != nil { + sym = symbols[i] + } + + var addr, off uint64 + if addresses != nil { + addr = addresses[i] + } + + if offsets != nil { + off = offsets[i] + } + + result, err := ex.address(sym, addr, off) + if err != nil { + return nil, err + } + + results = append(results, result) + } + + return results, nil +} + +type uprobeMultiLink struct { + RawLink +} + +var _ Link = (*uprobeMultiLink)(nil) + +func (kml *uprobeMultiLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported) +} + +var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6.6", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_upm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceUprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + // We try to create uprobe multi link on '/' path which results in + // error with -EBADF in case uprobe multi link is supported. + fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + Path: sys.NewStringPointer("/"), + Offsets: sys.NewPointer(unsafe.Pointer(&[]uint64{0})), + Count: 1, + }) + switch { + case errors.Is(err, unix.EBADF): + return nil + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + case err != nil: + return err + } + + // should not happen + fd.Close() + return errors.New("successfully attached uprobe_multi to /, kernel bug?") +}) diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go new file mode 100644 index 0000000000..2ec441229a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/xdp.go @@ -0,0 +1,80 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// XDPAttachFlags represents how XDP program will be attached to interface. +type XDPAttachFlags uint32 + +const ( + // XDPGenericMode (SKB) links XDP BPF program for drivers which do + // not yet support native XDP. + XDPGenericMode XDPAttachFlags = 1 << (iota + 1) + // XDPDriverMode links XDP BPF program into the driver’s receive path. + XDPDriverMode + // XDPOffloadMode offloads the entire XDP BPF program into hardware. + XDPOffloadMode +) + +type XDPOptions struct { + // Program must be an XDP BPF program. + Program *ebpf.Program + + // Interface is the interface index to attach program to. + Interface int + + // Flags is one of XDPAttachFlags (optional). + // + // Only one XDP mode should be set, without flag defaults + // to driver/generic mode (best effort). + Flags XDPAttachFlags +} + +// AttachXDP links an XDP BPF program to an XDP hook. +func AttachXDP(opts XDPOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.XDP { + return nil, fmt.Errorf("invalid program type %s, expected XDP", t) + } + + if opts.Interface < 1 { + return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) + } + + rawLink, err := AttachRawLink(RawLinkOptions{ + Program: opts.Program, + Attach: ebpf.AttachXDP, + Target: opts.Interface, + Flags: uint32(opts.Flags), + }) + + if err != nil { + return nil, fmt.Errorf("failed to attach link: %w", err) + } + + return &xdpLink{*rawLink}, nil +} + +type xdpLink struct { + RawLink +} + +func (xdp *xdpLink) Info() (*Info, error) { + var info sys.XDPLinkInfo + if err := sys.ObjInfo(xdp.fd, &info); err != nil { + return nil, fmt.Errorf("xdp link info: %s", err) + } + extra := &XDPInfo{ + Ifindex: info.Ifindex, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go new file mode 100644 index 0000000000..788f21b7b6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -0,0 +1,459 @@ +package ebpf + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "io/fs" + "math" + "slices" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// handles stores handle objects to avoid gc cleanup +type handles []*btf.Handle + +func (hs *handles) add(h *btf.Handle) (int, error) { + if h == nil { + return 0, nil + } + + if len(*hs) == math.MaxInt16 { + return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16) + } + + *hs = append(*hs, h) + + // return length of slice so that indexes start at 1 + return len(*hs), nil +} + +func (hs handles) fdArray() []int32 { + // first element of fda is reserved as no module can be indexed with 0 + fda := []int32{0} + for _, h := range hs { + fda = append(fda, int32(h.FD())) + } + + return fda +} + +func (hs *handles) Close() error { + var errs []error + for _, h := range *hs { + errs = append(errs, h.Close()) + } + return errors.Join(errs...) +} + +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. +// +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } + + currentSym := insns[0].Symbol() + if currentSym == "" { + return nil, errors.New("insns must start with a Symbol") + } + + start := 0 + progs := make(map[string]asm.Instructions) + for i, ins := range insns[1:] { + i := i + 1 + + sym := ins.Symbol() + if sym == "" { + continue + } + + // New symbol, flush the old one out. + progs[currentSym] = slices.Clone(insns[start:i]) + + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) + } + currentSym = sym + start = i + } + + if tail := insns[start:]; len(tail) > 0 { + progs[currentSym] = slices.Clone(tail) + } + + return progs, nil +} + +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. +// +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. +// +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true + } + } + return false +} + +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// Passing a nil target will relocate against the running kernel. insns are +// modified in place. +func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName string, bo binary.ByteOrder, b *btf.Builder) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } + + if len(relos) == 0 { + return nil + } + + if bo == nil { + bo = internal.NativeEndian + } + + if len(targets) == 0 { + kernelTarget, err := btf.LoadKernelSpec() + if err != nil { + return fmt.Errorf("load kernel spec: %w", err) + } + targets = append(targets, kernelTarget) + + if kmodName != "" { + kmodTarget, err := btf.LoadKernelModuleSpec(kmodName) + // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES disabled. + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("load kernel module spec: %w", err) + } + if err == nil { + targets = append(targets, kmodTarget) + } + } + } + + fixups, err := btf.CORERelocate(relos, targets, bo, b.Add) + if err != nil { + return err + } + + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("fixup for %s: %w", relos[i], err) + } + } + + return nil +} + +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() + } + + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) + } + + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } +} + +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] + + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(refs[prog])) + copy(pending, refs[prog]) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] + + if linked[ref] { + // We've already linked this ref, don't append instructions again. + continue + } + + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. + continue + } + + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) + } + + return insns +} + +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + // Map load was tagged with a Reference, but does not contain a Map pointer. + needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil + if ins.IsLoadFromMap() && needsMap && ins.Map() == nil { + return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference) + } + + fixupProbeReadKernel(ins) + } + + return nil +} + +// POISON_CALL_KFUNC_BASE in libbpf. +// https://github.com/libbpf/libbpf/blob/2778cbce609aa1e2747a69349f7f46a2f94f0522/src/libbpf.c#L5767 +const kfuncCallPoisonBase = 2002000000 + +// fixupKfuncs loops over all instructions in search for kfunc calls. +// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant +// and Instruction.Offset to the correct values. +func fixupKfuncs(insns asm.Instructions) (_ handles, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil { + goto fixups + } + } + + return nil, nil + +fixups: + // only load the kernel spec if we found at least one kfunc call + kernelSpec, err := btf.LoadKernelSpec() + if err != nil { + return nil, err + } + + fdArray := make(handles, 0) + defer closeOnError(&fdArray) + + for { + ins := iter.Ins + + metadata := ins.Metadata.Get(kfuncMetaKey{}) + if metadata == nil { + if !iter.Next() { + // break loop if this was the last instruction in the stream. + break + } + continue + } + + // check meta, if no meta return err + kfm, _ := metadata.(*kfuncMeta) + if kfm == nil { + return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta") + } + + target := btf.Type((*btf.Func)(nil)) + spec, module, err := findTargetInKernel(kernelSpec, kfm.Func.Name, &target) + if kfm.Binding == elf.STB_WEAK && errors.Is(err, btf.ErrNotFound) { + if ins.IsKfuncCall() { + // If the kfunc call is weak and not found, poison the call. Use a recognizable constant + // to make it easier to debug. And set src to zero so the verifier doesn't complain + // about the invalid imm/offset values before dead-code elimination. + ins.Constant = kfuncCallPoisonBase + ins.Src = 0 + } else if ins.OpCode.IsDWordLoad() { + // If the kfunc DWordLoad is weak and not found, set its address to 0. + ins.Constant = 0 + ins.Src = 0 + } else { + return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + } + + iter.Next() + continue + } + // Error on non-weak kfunc not found. + if errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported) + } + if err != nil { + return nil, err + } + + idx, err := fdArray.add(module) + if err != nil { + return nil, err + } + + if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil { + return nil, &incompatibleKfuncError{kfm.Func.Name, err} + } + + id, err := spec.TypeID(target) + if err != nil { + return nil, err + } + + ins.Constant = int64(id) + ins.Offset = int16(idx) + + if !iter.Next() { + break + } + } + + return fdArray, nil +} + +type incompatibleKfuncError struct { + name string + err error +} + +func (ike *incompatibleKfuncError) Error() string { + return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err) +} + +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } + + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return + } + + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) + } +} + +// resolveKconfigReferences creates and populates a .kconfig map if necessary. +// +// Returns a nil Map and no error if no references exist. +func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + var spec *MapSpec + iter := insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta != nil { + spec = meta.Map + break + } + } + + if spec == nil { + return nil, nil + } + + cpy := spec.Copy() + if err := resolveKconfig(cpy); err != nil { + return nil, err + } + + kconfig, err := NewMap(cpy) + if err != nil { + return nil, err + } + defer closeOnError(kconfig) + + // Resolve all instructions which load from .kconfig map with actual map + // and offset inside it. + iter = insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta == nil { + continue + } + + if meta.Map != spec { + return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index) + } + + if err := iter.Ins.AssociateMap(kconfig); err != nil { + return nil, fmt.Errorf("instruction %d: %w", iter.Index, err) + } + + // Encode a map read at the offset of the var in the datasec. + iter.Ins.Constant = int64(uint64(meta.Offset) << 32) + iter.Ins.Metadata.Set(kconfigMetaKey{}, nil) + } + + return kconfig, nil +} diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go new file mode 100644 index 0000000000..0b62101c3c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/map.go @@ -0,0 +1,1669 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "slices" + "strings" + "sync" + "time" + "unsafe" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// Errors returned by Map and MapIterator methods. +var ( + ErrKeyNotExist = errors.New("key does not exist") + ErrKeyExist = errors.New("key already exists") + ErrIterationAborted = errors.New("iteration aborted") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") + + // pre-allocating these errors here since they may get called in hot code paths + // and cause unnecessary memory allocations + errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist) +) + +// MapOptions control loading a map into the kernel. +type MapOptions struct { + // The base path to pin maps in if requested via PinByName. + // Existing maps will be re-used if they are compatible, otherwise an + // error is returned. + PinPath string + LoadPinOptions LoadPinOptions +} + +// MapID represents the unique ID of an eBPF map +type MapID uint32 + +// MapSpec defines a Map. +type MapSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + + // Flags is passed to the kernel and specifies additional map + // creation attributes. + Flags uint32 + + // Automatically pin and load a map from MapOptions.PinPath. + // Generates an error if an existing pinned map is incompatible with the MapSpec. + Pinning PinType + + // Specify numa node during map creation + // (effective only if unix.BPF_F_NUMA_NODE flag is set, + // which can be imported from golang.org/x/sys/unix) + NumaNode uint32 + + // The initial contents of the map. May be nil. + Contents []MapKV + + // Whether to freeze a map after setting its initial contents. + Freeze bool + + // InnerMap is used as a template for ArrayOfMaps and HashOfMaps + InnerMap *MapSpec + + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader + + // The key and value type of this map. May be nil. + Key, Value btf.Type +} + +func (ms *MapSpec) String() string { + return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) +} + +// Copy returns a copy of the spec. +// +// MapSpec.Contents is a shallow copy. +func (ms *MapSpec) Copy() *MapSpec { + if ms == nil { + return nil + } + + cpy := *ms + cpy.Contents = slices.Clone(cpy.Contents) + cpy.Key = btf.Copy(cpy.Key) + cpy.Value = btf.Copy(cpy.Value) + + if cpy.InnerMap == ms { + cpy.InnerMap = &cpy + } else { + cpy.InnerMap = ms.InnerMap.Copy() + } + + if cpy.Extra != nil { + extra := *cpy.Extra + cpy.Extra = &extra + } + + return &cpy +} + +// fixupMagicFields fills fields of MapSpec which are usually +// left empty in ELF or which depend on runtime information. +// +// The method doesn't modify Spec, instead returning a copy. +// The copy is only performed if fixups are necessary, so callers mustn't mutate +// the returned spec. +func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { + switch spec.Type { + case ArrayOfMaps, HashOfMaps: + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for map of map") + } + + spec = spec.Copy() + spec.ValueSize = 4 + + case PerfEventArray: + if spec.KeySize != 0 && spec.KeySize != 4 { + return nil, errors.New("KeySize must be zero or four for perf event array") + } + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for perf event array") + } + + spec = spec.Copy() + spec.KeySize = 4 + spec.ValueSize = 4 + + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup perf event array: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // MaxEntries should be zero most of the time, but there is code + // out there which hardcodes large constants. Clamp the number + // of entries to the number of CPUs at most. Allow creating maps with + // less than n items since some kernel selftests relied on this + // behaviour in the past. + spec.MaxEntries = n + } + } + + return spec, nil +} + +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + if ms.Value == nil { + return nil, nil, errMapNoBTFValue + } + + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + } + + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + +// MapKV is used to initialize the contents of a Map. +type MapKV struct { + Key interface{} + Value interface{} +} + +// Compatible returns nil if an existing map may be used instead of creating +// one from the spec. +// +// Returns an error wrapping [ErrMapIncompatible] otherwise. +func (ms *MapSpec) Compatible(m *Map) error { + ms, err := ms.fixupMagicFields() + if err != nil { + return err + } + + diffs := []string{} + if m.typ != ms.Type { + diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type)) + } + if m.keySize != ms.KeySize { + diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize)) + } + if m.valueSize != ms.ValueSize { + diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize)) + } + if m.maxEntries != ms.MaxEntries { + diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) + } + + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow this + // mismatch. + if !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) && + m.flags != ms.Flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, ms.Flags)) + } + + if len(diffs) == 0 { + return nil + } + + return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible) +} + +// Map represents a Map file descriptor. +// +// It is not safe to close a map which is used by other goroutines. +// +// Methods which take interface{} arguments by default encode +// them using binary.Read/Write in the machine's native endianness. +// +// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler +// if you require custom encoding. +type Map struct { + name string + fd *sys.FD + typ MapType + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + pinnedPath string + // Per CPU maps return values larger than the size in the spec + fullValueSize int +} + +// NewMapFromFD creates a map from a raw fd. +// +// You should not use fd after calling this function. +func NewMapFromFD(fd int) (*Map, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newMapFromFD(f) +} + +func newMapFromFD(fd *sys.FD) (*Map, error) { + info, err := newMapInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("get map info: %w", err) + } + + return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) +} + +// NewMap creates a new Map. +// +// It's equivalent to calling NewMapWithOptions with default options. +func NewMap(spec *MapSpec) (*Map, error) { + return NewMapWithOptions(spec, MapOptions{}) +} + +// NewMapWithOptions creates a new Map. +// +// Creating a map for the first time will perform feature detection +// by creating small, temporary maps. +// +// The caller is responsible for ensuring the process' rlimit is set +// sufficiently high for locking memory during map creation. This can be done +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. +// +// May return an error wrapping ErrMapIncompatible. +func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { + m, err := newMapWithOptions(spec, opts) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + if err := m.finalize(spec); err != nil { + m.Close() + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil +} + +func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + switch spec.Pinning { + case PinByName: + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") + } + + path := filepath.Join(opts.PinPath, spec.Name) + m, err := LoadPinnedMap(path, &opts.LoadPinOptions) + if errors.Is(err, unix.ENOENT) { + break + } + if err != nil { + return nil, fmt.Errorf("load pinned map: %w", err) + } + defer closeOnError(m) + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) + } + + return m, nil + + case PinNone: + // Nothing to do here + + default: + return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) + } + + var innerFd *sys.FD + if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { + if spec.InnerMap == nil { + return nil, fmt.Errorf("%s requires InnerMap", spec.Type) + } + + if spec.InnerMap.Pinning != PinNone { + return nil, errors.New("inner maps cannot be pinned") + } + + template, err := spec.InnerMap.createMap(nil, opts) + if err != nil { + return nil, fmt.Errorf("inner map: %w", err) + } + defer template.Close() + + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + + innerFd = template.fd + } + + m, err := spec.createMap(innerFd, opts) + if err != nil { + return nil, err + } + defer closeOnError(m) + + if spec.Pinning == PinByName { + path := filepath.Join(opts.PinPath, spec.Name) + if err := m.Pin(path); err != nil { + return nil, fmt.Errorf("pin map to %s: %w", path, err) + } + } + + return m, nil +} + +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) { + closeOnError := func(closer io.Closer) { + if err != nil { + closer.Close() + } + } + + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + } + + spec, err = spec.fixupMagicFields() + if err != nil { + return nil, err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(spec.Type), + KeySize: spec.KeySize, + ValueSize: spec.ValueSize, + MaxEntries: spec.MaxEntries, + MapFlags: sys.MapFlags(spec.Flags), + NumaNode: spec.NumaNode, + } + + if inner != nil { + attr.InnerMapFd = inner.Uint() + } + + if haveObjName() == nil { + attr.MapName = sys.NewObjName(spec.Name) + } + + if spec.Key != nil || spec.Value != nil { + handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value) + if err != nil && !errors.Is(err, btf.ErrNotSupported) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + if handle != nil { + defer handle.Close() + + // Use BTF k/v during map creation. + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = keyTypeID + attr.BtfValueTypeId = valueTypeID + } + } + + fd, err := sys.MapCreate(&attr) + + // Some map types don't support BTF k/v in earlier kernel versions. + // Remove BTF metadata and retry map creation. + if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 { + attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0 + fd, err = sys.MapCreate(&attr) + } + if err != nil { + return nil, handleMapCreateError(attr, spec, err) + } + + defer closeOnError(fd) + m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + if err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + return m, nil +} + +func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error { + if errors.Is(err, unix.EPERM) { + return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + if errors.Is(err, unix.EINVAL) && spec.MaxEntries == 0 { + return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } + if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap { + return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) + } + if errors.Is(err, unix.EINVAL) && spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + return fmt.Errorf("map create: %w (noPrealloc flag may be incompatible with map type %s)", err, spec.Type) + } + + switch spec.Type { + case ArrayOfMaps, HashOfMaps: + if haveFeatErr := haveNestedMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&unix.BPF_F_MMAPABLE > 0 { + if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&unix.BPF_F_INNER_MAP > 0 { + if haveFeatErr := haveInnerMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + // BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size. + if errors.Is(err, unix.EINVAL) && + (attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) { + pageSize := uint32(os.Getpagesize()) + maxEntries := attr.MaxEntries + if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) { + return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize) + } + } + + return fmt.Errorf("map create: %w", err) +} + +// newMap allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. +func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { + m := &Map{ + name, + fd, + typ, + keySize, + valueSize, + maxEntries, + flags, + "", + int(valueSize), + } + + if !typ.hasPerCPUValue() { + return m, nil + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs + return m, nil +} + +func (m *Map) String() string { + if m.name != "" { + return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd) + } + return fmt.Sprintf("%s#%v", m.typ, m.fd) +} + +// Type returns the underlying type of the map. +func (m *Map) Type() MapType { + return m.typ +} + +// KeySize returns the size of the map key in bytes. +func (m *Map) KeySize() uint32 { + return m.keySize +} + +// ValueSize returns the size of the map value in bytes. +func (m *Map) ValueSize() uint32 { + return m.valueSize +} + +// MaxEntries returns the maximum number of elements the map can hold. +func (m *Map) MaxEntries() uint32 { + return m.maxEntries +} + +// Flags returns the flags of the map. +func (m *Map) Flags() uint32 { + return m.flags +} + +// Info returns metadata about the map. +func (m *Map) Info() (*MapInfo, error) { + return newMapInfoFromFd(m.fd) +} + +// Handle returns a reference to the Map's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the Map. +func (m *Map) Handle() (*btf.Handle, error) { + info, err := m.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = unix.BPF_F_LOCK + +// Lookup retrieves a value from a Map. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) Lookup(key, valueOut interface{}) error { + return m.LookupWithFlags(key, valueOut, 0) +} + +// LookupWithFlags retrieves a value from a Map with flags. +// +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupPerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil { + return err + } + + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.LookupAndDeleteWithFlags(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupAndDeletePerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil { + return err + } + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupBytes gets a value from Map. +// +// Returns a nil value if a key doesn't exist. +func (m *Map) LookupBytes(key interface{}) ([]byte, error) { + valueBytes := make([]byte, m.fullValueSize) + valuePtr := sys.NewSlicePointer(valueBytes) + + err := m.lookup(key, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return valueBytes, err +} + +func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + if errors.Is(err, unix.ENOENT) { + return errMapLookupKeyNotExist + } + return fmt.Errorf("lookup: %w", wrapMapError(err)) + } + return nil +} + +func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary. +func ensurePerCPUSlice(sliceOrPtr any) (any, error) { + sliceOrPtrType := reflect.TypeOf(sliceOrPtr) + if sliceOrPtrType.Kind() == reflect.Slice { + // The target is a slice, the caller is responsible for ensuring that + // size is correct. + return sliceOrPtr, nil + } + + slicePtrType := sliceOrPtrType + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + reflect.ValueOf(sliceOrPtr).Elem().Set(slice) + if !sliceElemIsPointer { + return slice.Interface(), nil + } + sliceElemType = sliceElemType.Elem() + + for i := 0; i < possibleCPUs; i++ { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + } + + return slice.Interface(), nil +} + +func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return nil +} + +// MapUpdateFlags controls the behaviour of the Map.Update call. +// +// The exact semantics depend on the specific MapType. +type MapUpdateFlags uint64 + +const ( + // UpdateAny creates a new element or update an existing one. + UpdateAny MapUpdateFlags = iota + // UpdateNoExist creates a new element. + UpdateNoExist MapUpdateFlags = 1 << (iota - 1) + // UpdateExist updates an existing element. + UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock +) + +// Put replaces or creates a value in map. +// +// It is equivalent to calling Update with UpdateAny. +func (m *Map) Put(key, value interface{}) error { + return m.Update(key, value, UpdateAny) +} + +// Update changes the value of a key. +func (m *Map) Update(key, value any, flags MapUpdateFlags) error { + if m.typ.hasPerCPUValue() { + return m.updatePerCPU(key, value, flags) + } + + valuePtr, err := m.marshalValue(value) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error { + valuePtr, err := marshalPerCPUValue(value, int(m.valueSize)) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("marshal key: %w", err) + } + + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) + } + + return nil +} + +// Delete removes a value. +// +// Returns ErrKeyNotExist if the key does not exist. +func (m *Map) Delete(key interface{}) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) + } + return nil +} + +// NextKey finds the key following an initial key. +// +// See NextKeyBytes for details. +// +// Returns ErrKeyNotExist if there is no next key. +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) + + if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil { + return err + } + + if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil { + return fmt.Errorf("can't unmarshal next key: %w", err) + } + return nil +} + +// NextKeyBytes returns the key following an initial key as a byte slice. +// +// Passing nil will return the first key. +// +// Use Iterate if you want to traverse all entries in the map. +// +// Returns nil if there are no more keys. +func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { + nextKey := make([]byte, m.keySize) + nextKeyPtr := sys.NewSlicePointer(nextKey) + + err := m.nextKey(key, nextKeyPtr) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return nextKey, err +} + +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { + var ( + keyPtr sys.Pointer + err error + ) + + if key != nil { + keyPtr, err = m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + } + + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, + } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte + guessKey, err = m.guessNonExistentKey() + if err != nil { + return err + } + + // Retry the syscall with a valid non-existing key. + attr.Key = sys.NewSlicePointer(guessKey) + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) + } + + return nil +} + +var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) { + return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED) +}) + +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Map a protected page and use that as the value pointer. This saves some + // work copying out the value, which we're not interested in. + page, err := mmapProtectedPage() + if err != nil { + return nil, err + } + valuePtr := sys.NewSlicePointer(page) + + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return randKey, nil + } + } + + return nil, errors.New("couldn't find non-existing key") +} + +// BatchLookup looks up many elements in a map at once. +// +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup: %w", err) + } + return n, nil +} + +// BatchLookupAndDelete looks up many elements in a map at once, +// +// It then deletes all those elements. +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup and delete: %w", err) + } + return n, nil +} + +// MapBatchCursor represents a starting point for a batch operation. +type MapBatchCursor struct { + m *Map + opaque []byte +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts) + } + + count, err := batchCount(keysOut, valuesOut) + if err != nil { + return 0, err + } + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if errors.Is(err, unix.ENOSPC) { + // Hash tables return ENOSPC when the size of the batch is smaller than + // any bucket. + return n, fmt.Errorf("%w (batch size too small?)", err) + } else if err != nil { + return n, err + } + + err = valueBuf.Unmarshal(valuesOut) + if err != nil { + return 0, err + } + + return n, nil +} + +func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keysOut) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf := make([]byte, count*int(m.fullValueSize)) + valuePtr := sys.NewSlicePointer(valueBuf) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, err + } + + err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf) + if err != nil { + return 0, err + } + + return n, sysErr +} + +func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + cursorLen := int(m.keySize) + if cursorLen < 4 { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen = 4 + } + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") + } + + if err := haveBatchAPI(); err != nil { + return 0, err + } + + keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize)) + + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyBuf.Pointer(), + Values: valuePtr, + Count: uint32(count), + InBatch: sys.NewSlicePointer(inBatch), + OutBatch: sys.NewSlicePointer(cursor.opaque), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + if err := keyBuf.Unmarshal(keysOut); err != nil { + return 0, err + } + + return int(attr.Count), sysErr +} + +// BatchUpdate updates the map with multiple keys and values +// simultaneously. +// "keys" and "values" must be of type slice, a pointer +// to a slice or buffer will not work. +func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchUpdatePerCPU(keys, values, opts) + } + + count, err := batchCount(keys, values) + if err != nil { + return 0, err + } + + valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, valuePtr, opts) +} + +func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, err + } + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts) +} + +// BatchDelete batch deletes entries in the map by keys. +// "keys" must be of type slice, a pointer to a slice or buffer will not work. +func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, fmt.Errorf("cannot marshal keys: %v", err) + } + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func batchCount(keys, values any) (int, error) { + keysLen, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valuesLen, err := sliceLen(values) + if err != nil { + return 0, fmt.Errorf("values: %w", err) + } + + if keysLen != valuesLen { + return 0, fmt.Errorf("keys and values must have the same length") + } + + return keysLen, nil +} + +// Iterate traverses a map. +// +// It's safe to create multiple iterators at the same time. +// +// It's not possible to guarantee that all keys in a map will be +// returned if there are concurrent modifications to the map. +func (m *Map) Iterate() *MapIterator { + return newMapIterator(m) +} + +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. +func (m *Map) Close() error { + if m == nil { + // This makes it easier to clean up when iterating maps + // of maps / programs. + return nil + } + + return m.fd.Close() +} + +// FD gets the file descriptor of the Map. +// +// Calling this function is invalid after Close has been called. +func (m *Map) FD() int { + return m.fd.Int() +} + +// Clone creates a duplicate of the Map. +// +// Closing the duplicate does not affect the original, and vice versa. +// Changes made to the map are reflected by both instances however. +// If the original map was pinned, the cloned map will not be pinned by default. +// +// Cloning a nil Map returns nil. +func (m *Map) Clone() (*Map, error) { + if m == nil { + return nil, nil + } + + dup, err := m.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone map: %w", err) + } + + return &Map{ + m.name, + dup, + m.typ, + m.keySize, + m.valueSize, + m.maxEntries, + m.flags, + "", + m.fullValueSize, + }, nil +} + +// Pin persists the map on the BPF virtual file system past the lifetime of +// the process that created it . +// +// Calling Pin on a previously pinned map will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// You can Clone a map to pin it to a different path. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (m *Map) Pin(fileName string) error { + if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil { + return err + } + m.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the map from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Map returns nil. +func (m *Map) Unpin() error { + if err := internal.Unpin(m.pinnedPath); err != nil { + return err + } + m.pinnedPath = "" + return nil +} + +// IsPinned returns true if the map has a non-empty pinned path. +func (m *Map) IsPinned() bool { + return m.pinnedPath != "" +} + +// Freeze prevents a map to be modified from user space. +// +// It makes no changes to kernel-side restrictions. +func (m *Map) Freeze() error { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("can't freeze map: %w", haveFeatErr) + } + return fmt.Errorf("can't freeze map: %w", err) + } + return nil +} + +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { + if err := m.Put(kv.Key, kv.Value); err != nil { + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) + } + } + + if spec.Freeze { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + + return nil +} + +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { + if data == nil { + if m.keySize == 0 { + // Queues have a key length of zero, so passing nil here is valid. + return sys.NewPointer(nil), nil + } + return sys.Pointer{}, errors.New("can't use nil as key of map") + } + + return marshalMapSyscallInput(data, int(m.keySize)) +} + +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { + var ( + buf []byte + err error + ) + + switch value := data.(type) { + case *Map: + if !m.typ.canStoreMap() { + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + } + buf, err = marshalMap(value, int(m.valueSize)) + + case *Program: + if !m.typ.canStoreProgram() { + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + } + buf, err = marshalProgram(value, int(m.valueSize)) + + default: + return marshalMapSyscallInput(data, int(m.valueSize)) + } + + if err != nil { + return sys.Pointer{}, err + } + + return sys.NewSlicePointer(buf), nil +} + +func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { + switch value := value.(type) { + case **Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + + other, err := unmarshalMap(buf) + if err != nil { + return err + } + + // The caller might close the map externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + return errors.New("require pointer to *Map") + + case **Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + + other, err := unmarshalProgram(buf) + if err != nil { + return err + } + + // The caller might close the program externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + return errors.New("require pointer to *Program") + } + + return buf.Unmarshal(value) +} + +// LoadPinnedMap loads a Map from a BPF file. +func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + m, err := newMapFromFD(fd) + if err == nil { + m.pinnedPath = fileName + } + + return m, err +} + +// unmarshalMap creates a map from a map ID encoded in host endianness. +func unmarshalMap(buf sysenc.Buffer) (*Map, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + return NewMapFromID(MapID(id)) +} + +// marshalMap marshals the fd of a map into a buffer in host endianness. +func marshalMap(m *Map, length int) ([]byte, error) { + if length != 4 { + return nil, fmt.Errorf("can't marshal map to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) + return buf, nil +} + +// MapIterator iterates a Map. +// +// See Map.Iterate. +type MapIterator struct { + target *Map + // Temporary storage to avoid allocations in Next(). This is any instead + // of []byte to avoid allocations. + cursor any + count, maxEntries uint32 + done bool + err error +} + +func newMapIterator(target *Map) *MapIterator { + return &MapIterator{ + target: target, + maxEntries: target.maxEntries, + } +} + +// Next decodes the next key and value. +// +// Iterating a hash map from which keys are being deleted is not +// safe. You may see the same key multiple times. Iteration may +// also abort with an error, see IsIterationAborted. +// +// Returns false if there are no more entries. You must check +// the result of Err afterwards. +// +// See Map.Get for further caveats around valueOut. +func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { + if mi.err != nil || mi.done { + return false + } + + // For array-like maps NextKey returns nil only after maxEntries + // iterations. + for mi.count <= mi.maxEntries { + if mi.cursor == nil { + // Pass nil interface to NextKey to make sure the Map's first key + // is returned. If we pass an uninitialized []byte instead, it'll see a + // non-nil interface and try to marshal it. + mi.cursor = make([]byte, mi.target.keySize) + mi.err = mi.target.NextKey(nil, mi.cursor) + } else { + mi.err = mi.target.NextKey(mi.cursor, mi.cursor) + } + + if errors.Is(mi.err, ErrKeyNotExist) { + mi.done = true + mi.err = nil + return false + } else if mi.err != nil { + mi.err = fmt.Errorf("get next key: %w", mi.err) + return false + } + + mi.count++ + mi.err = mi.target.Lookup(mi.cursor, valueOut) + if errors.Is(mi.err, ErrKeyNotExist) { + // Even though the key should be valid, we couldn't look up + // its value. If we're iterating a hash map this is probably + // because a concurrent delete removed the value before we + // could get it. This means that the next call to NextKeyBytes + // is very likely to restart iteration. + // If we're iterating one of the fd maps like + // ProgramArray it means that a given slot doesn't have + // a valid fd associated. It's OK to continue to the next slot. + continue + } + if mi.err != nil { + mi.err = fmt.Errorf("look up next key: %w", mi.err) + return false + } + + buf := mi.cursor.([]byte) + if ptr, ok := keyOut.(unsafe.Pointer); ok { + copy(unsafe.Slice((*byte)(ptr), len(buf)), buf) + } else { + mi.err = sysenc.Unmarshal(keyOut, buf) + } + + return mi.err == nil + } + + mi.err = fmt.Errorf("%w", ErrIterationAborted) + return false +} + +// Err returns any encountered error. +// +// The method must be called after Next returns nil. +// +// Returns ErrIterationAborted if it wasn't possible to do a full iteration. +func (mi *MapIterator) Err() error { + return mi.err +} + +// MapGetNextID returns the ID of the next eBPF map. +// +// Returns ErrNotExist, if there is no next eBPF map. +func MapGetNextID(startID MapID) (MapID, error) { + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) +} + +// NewMapFromID returns the map for a given id. +// +// Returns ErrNotExist, if there is no eBPF map with the given id. +func NewMapFromID(id MapID) (*Map, error) { + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, err + } + + return newMapFromFD(fd) +} + +// sliceLen returns the length if the value is a slice or an error otherwise. +func sliceLen(slice any) (int, error) { + sliceValue := reflect.ValueOf(slice) + if sliceValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("%T is not a slice", slice) + } + return sliceValue.Len(), nil +} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go new file mode 100644 index 0000000000..57a0a8e88a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -0,0 +1,210 @@ +package ebpf + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "slices" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" +) + +// marshalMapSyscallInput converts an arbitrary value into a pointer suitable +// to be passed to the kernel. +// +// As an optimization, it returns the original value if it is an +// unsafe.Pointer. +func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) { + if ptr, ok := data.(unsafe.Pointer); ok { + return sys.NewPointer(ptr), nil + } + + buf, err := sysenc.Marshal(data, length) + if err != nil { + return sys.Pointer{}, err + } + + return buf.Pointer(), nil +} + +func makeMapSyscallOutput(dst any, length int) sysenc.Buffer { + if ptr, ok := dst.(unsafe.Pointer); ok { + return sysenc.UnsafeBuffer(ptr) + } + + _, ok := dst.(encoding.BinaryUnmarshaler) + if ok { + return sysenc.SyscallOutput(nil, length) + } + + return sysenc.SyscallOutput(dst, length) +} + +// appendPerCPUSlice encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, errors.New("per-CPU value requires slice") + } + + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return nil, fmt.Errorf("per-CPU value greater than number of CPUs") + } + + // Grow increases the slice's capacity, _if_necessary_ + buf = slices.Grow(buf, alignedElemLength*possibleCPUs) + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := sysenc.Marshal(elem, elemLength) + if err != nil { + return nil, err + } + + buf = elemBytes.AppendTo(buf) + buf = append(buf, make([]byte, alignedElemLength-elemLength)...) + } + + // Ensure buf is zero-padded full size. + buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...) + + return buf, nil +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { + possibleCPUs, err := PossibleCPU() + if err != nil { + return sys.Pointer{}, err + } + + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, alignedElemLength*possibleCPUs) + buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return sys.Pointer{}, err + } + + return sys.NewSlicePointer(buf), nil +} + +// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing +// one value per possible CPU into a buffer of bytes. +func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, fmt.Errorf("batch value requires a slice") + } + sliceValue := reflect.ValueOf(slice) + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + batchLen*possibleCPUs, sliceValue.Len()) + } + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs) + for i := 0; i < batchLen; i++ { + batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return nil, fmt.Errorf("batch %d: %w", i, err) + } + } + return buf, nil +} + +// unmarshalPerCPUValue decodes a buffer into a slice containing one value per +// possible CPU. +// +// slice must be a literal slice and not a pointer. +func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("per-CPU value requires a slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + + sliceValue := reflect.ValueOf(slice) + if sliceValue.Len() != possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + possibleCPUs, sliceValue.Len()) + } + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + stride := internal.Align(elemLength, 8) + for i := 0; i < possibleCPUs; i++ { + var elem any + v := sliceValue.Index(i) + if sliceElemIsPointer { + if !v.Elem().CanAddr() { + return fmt.Errorf("per-CPU slice elements cannot be nil") + } + elem = v.Elem().Addr().Interface() + } else { + elem = v.Addr().Interface() + } + err := sysenc.Unmarshal(elem, buf[:elemLength]) + if err != nil { + return fmt.Errorf("cpu %d: %w", i, err) + } + + buf = buf[stride:] + } + return nil +} + +// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice +// containing one value per possible CPU. +// +// slice must have length batchLen * PossibleCPUs(). +func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("batch requires a slice") + } + + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + sliceValue.Len(), batchLen*possibleCPUs) + } + + fullValueSize := possibleCPUs * internal.Align(elemLength, 8) + if len(buf) != batchLen*fullValueSize { + return fmt.Errorf("input buffer has incorrect length, expected %d, got %d", + len(buf), batchLen*fullValueSize) + } + + for i := 0; i < batchLen; i++ { + elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil { + return fmt.Errorf("batch %d: %w", i, err) + } + buf = buf[fullValueSize:] + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/netlify.toml b/vendor/github.com/cilium/ebpf/netlify.toml new file mode 100644 index 0000000000..67c83f3b30 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/netlify.toml @@ -0,0 +1,4 @@ +[build] + base = "docs/" + publish = "site/" + command = "mkdocs build" diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go new file mode 100644 index 0000000000..9bc6325f88 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -0,0 +1,1141 @@ +package ebpf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "path/filepath" + "runtime" + "strings" + "time" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// ErrNotSupported is returned whenever the kernel doesn't support a feature. +var ErrNotSupported = internal.ErrNotSupported + +// errBadRelocation is returned when the verifier rejects a program due to a +// bad CO-RE relocation. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errBadRelocation = errors.New("bad CO-RE relocation") + +// errUnknownKfunc is returned when the verifier rejects a program due to an +// unknown kfunc. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errUnknownKfunc = errors.New("unknown kfunc") + +// ProgramID represents the unique ID of an eBPF program. +type ProgramID uint32 + +const ( + // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. + // This is currently the maximum of spare space allocated for SKB + // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. + outputPad = 256 + 2 +) + +// Deprecated: the correct log size is now detected automatically and this +// constant is unused. +const DefaultVerifierLogSize = 64 * 1024 + +// minVerifierLogSize is the default number of bytes allocated for the +// verifier log. +const minVerifierLogSize = 64 * 1024 + +// ProgramOptions control loading a program into the kernel. +type ProgramOptions struct { + // Bitmap controlling the detail emitted by the kernel's eBPF verifier log. + // LogLevel-type values can be ORed together to request specific kinds of + // verifier output. See the documentation on [ebpf.LogLevel] for details. + // + // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats) + // + // If left to its default value, the program will first be loaded without + // verifier output enabled. Upon error, the program load will be repeated + // with LogLevelBranch and the given (or default) LogSize value. + // + // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier + // log, populating the [ebpf.Program.VerifierLog] field on successful loads + // and including detailed verifier errors if the program is rejected. This + // will always allocate an output buffer, but will result in only a single + // attempt at loading the program. + LogLevel LogLevel + + // Deprecated: the correct log buffer size is determined automatically + // and this field is ignored. + LogSize int + + // Disables the verifier log completely, regardless of other options. + LogDisabled bool + + // Type information used for CO-RE relocations. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec + + // Type information used for CO-RE relocations of kernel modules, + // indexed by module name. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel module BTF from a well-known location if nil. + KernelModuleTypes map[string]*btf.Spec +} + +// ProgramSpec defines a Program. +type ProgramSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + + // Type determines at which hook in the kernel a program will run. + Type ProgramType + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. + AttachType AttachType + + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + + // The program to attach to. Must be provided manually. + AttachTarget *Program + + // The name of the ELF section this program originated from. + SectionName string + + Instructions asm.Instructions + + // Flags is passed to the kernel and specifies additional program + // load attributes. + Flags uint32 + + // License of the program. Some helpers are only available if + // the license is deemed compatible with the GPL. + // + // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 + License string + + // Version used by Kprobe programs. + // + // Deprecated on kernels 5.0 and later. Leave empty to let the library + // detect this value automatically. + KernelVersion uint32 + + // The byte order this program was compiled for, may be nil. + ByteOrder binary.ByteOrder +} + +// Copy returns a copy of the spec. +func (ps *ProgramSpec) Copy() *ProgramSpec { + if ps == nil { + return nil + } + + cpy := *ps + cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) + copy(cpy.Instructions, ps.Instructions) + return &cpy +} + +// Tag calculates the kernel tag for a series of instructions. +// +// Use asm.Instructions.Tag if you need to calculate for non-native endianness. +func (ps *ProgramSpec) Tag() (string, error) { + return ps.Instructions.Tag(internal.NativeEndian) +} + +// KernelModule returns the kernel module, if any, the AttachTo function is contained in. +func (ps *ProgramSpec) KernelModule() (string, error) { + if ps.AttachTo == "" { + return "", nil + } + + switch ps.Type { + default: + return "", nil + case Tracing: + switch ps.AttachType { + default: + return "", nil + case AttachTraceFEntry: + case AttachTraceFExit: + } + fallthrough + case Kprobe: + return kallsyms.KernelModule(ps.AttachTo) + } +} + +// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a +// program is rejected by the verifier. +// +// Use [errors.As] to access the error. +type VerifierError = internal.VerifierError + +// Program represents BPF program loaded into the kernel. +// +// It is not safe to close a Program which is used by other goroutines. +type Program struct { + // Contains the output of the kernel verifier if enabled, + // otherwise it is empty. + VerifierLog string + + fd *sys.FD + name string + pinnedPath string + typ ProgramType +} + +// NewProgram creates a new Program. +// +// See [NewProgramWithOptions] for details. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgram(spec *ProgramSpec) (*Program, error) { + return NewProgramWithOptions(spec, ProgramOptions{}) +} + +// NewProgramWithOptions creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + + prog, err := newProgramWithOptions(spec, opts) + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { + return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) + } + return prog, err +} + +var ( + coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel)) + // This log message was introduced by ebb676daa1a3 ("bpf: Print function name in + // addition to function id") which first appeared in v4.10 and has remained + // unchanged since. + coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel)) + kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase)) +) + +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if len(spec.Instructions) == 0 { + return nil, errors.New("instructions cannot be empty") + } + + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") + } + + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) + } + + // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") + // require the version field to be set to the value of the KERNEL_VERSION + // macro for kprobe-type programs. + // Overwrite Kprobe program version if set to zero or the magic version constant. + kv := spec.KernelVersion + if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { + v, err := internal.KernelVersion() + if err != nil { + return nil, fmt.Errorf("detecting kernel version: %w", err) + } + kv = v.Kernel() + } + + attr := &sys.ProgLoadAttr{ + ProgType: sys.ProgType(spec.Type), + ProgFlags: spec.Flags, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, + } + + if haveObjName() == nil { + attr.ProgName = sys.NewObjName(spec.Name) + } + + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) + + kmodName, err := spec.KernelModule() + if err != nil { + return nil, fmt.Errorf("kernel module search: %w", err) + } + + var targets []*btf.Spec + if opts.KernelTypes != nil { + targets = append(targets, opts.KernelTypes) + } + if kmodName != "" && opts.KernelModuleTypes != nil { + if modBTF, ok := opts.KernelModuleTypes[kmodName]; ok { + targets = append(targets, modBTF) + } + } + + var b btf.Builder + if err := applyRelocations(insns, targets, kmodName, spec.ByteOrder, &b); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) + } + + errExtInfos := haveProgramExtInfos() + if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) { + // There is at least one CO-RE relocation which relies on a stable local + // type ID. + // Return ErrNotSupported instead of E2BIG if there is no BTF support. + return nil, errExtInfos + } + + if errExtInfos == nil { + // Only add func and line info if the kernel supports it. This allows + // BPF compiled with modern toolchains to work on old kernels. + fib, lib, err := btf.MarshalExtInfos(insns, &b) + if err != nil { + return nil, fmt.Errorf("marshal ext_infos: %w", err) + } + + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.NewSlicePointer(fib) + + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.NewSlicePointer(lib) + } + + if !b.Empty() { + handle, err := btf.NewHandle(&b) + if err != nil { + return nil, fmt.Errorf("load BTF: %w", err) + } + defer handle.Close() + + attr.ProgBtfFd = uint32(handle.FD()) + } + + kconfig, err := resolveKconfigReferences(insns) + if err != nil { + return nil, fmt.Errorf("resolve .kconfig: %w", err) + } + defer kconfig.Close() + + if err := fixupAndValidate(insns); err != nil { + return nil, err + } + + handles, err := fixupKfuncs(insns) + if err != nil { + return nil, fmt.Errorf("fixing up kfuncs: %w", err) + } + defer handles.Close() + + if len(handles) > 0 { + fdArray := handles.fdArray() + attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0])) + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + err = insns.Marshal(buf, internal.NativeEndian) + if err != nil { + return nil, err + } + + bytecode := buf.Bytes() + attr.Insns = sys.NewSlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) + + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil { + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + if module != nil { + attr.AttachBtfObjFd = uint32(module.FD()) + defer module.Close() + } + } + + // The caller requested a specific verifier log level. Set up the log buffer + // so that there is a chance of loading the program in a single shot. + var logBuf []byte + if !opts.LogDisabled && opts.LogLevel != 0 { + logBuf = make([]byte, minVerifierLogSize) + attr.LogLevel = opts.LogLevel + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = sys.NewSlicePointer(logBuf) + } + + for { + var fd *sys.FD + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + } + + if opts.LogDisabled { + break + } + + if attr.LogTrueSize != 0 && attr.LogSize >= attr.LogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.LogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Logging is enabled and the error is not ENOSPC, so we can infer + // that the log buffer is large enough. + break + } + + if attr.LogLevel == 0 { + // Logging is not enabled but loading the program failed. Enable + // basic logging. + attr.LogLevel = LogLevelBranch + } + + // Make an educated guess how large the buffer should be. Start + // at minVerifierLogSize and then double the size. + logSize := uint32(max(len(logBuf)*2, minVerifierLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.LogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.LogTrueSize + } + + logBuf = make([]byte, logSize) + attr.LogSize = logSize + attr.LogBuf = sys.NewSlicePointer(logBuf) + } + + end := bytes.IndexByte(logBuf, 0) + if end < 0 { + end = len(logBuf) + } + + tail := logBuf[max(end-256, 0):end] + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + + case errors.Is(err, unix.EINVAL): + if bytes.Contains(tail, coreBadCall) { + err = errBadRelocation + break + } else if bytes.Contains(tail, kfuncBadCall) { + err = errUnknownKfunc + break + } + + case errors.Is(err, unix.EACCES): + if bytes.Contains(tail, coreBadLoad) { + err = errBadRelocation + break + } + } + + // hasFunctionReferences may be expensive, so check it last. + if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) && + hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } + } + + return nil, internal.ErrorWithLog("load program", err, logBuf) +} + +// NewProgramFromFD creates a program from a raw fd. +// +// You should not use fd after calling this function. +// +// Requires at least Linux 4.10. +func NewProgramFromFD(fd int) (*Program, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newProgramFromFD(f) +} + +// NewProgramFromID returns the program for a given id. +// +// Returns ErrNotExist, if there is no eBPF program with the given id. +func NewProgramFromID(id ProgramID) (*Program, error) { + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get program by id: %w", err) + } + + return newProgramFromFD(fd) +} + +func newProgramFromFD(fd *sys.FD) (*Program, error) { + info, err := newProgramInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("discover program type: %w", err) + } + + return &Program{"", fd, info.Name, "", info.Type}, nil +} + +func (p *Program) String() string { + if p.name != "" { + return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd) + } + return fmt.Sprintf("%s(%v)", p.typ, p.fd) +} + +// Type returns the underlying type of the program. +func (p *Program) Type() ProgramType { + return p.typ +} + +// Info returns metadata about the program. +// +// Requires at least 4.10. +func (p *Program) Info() (*ProgramInfo, error) { + return newProgramInfoFromFd(p.fd) +} + +// Handle returns a reference to the program's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + return p.fd.Int() +} + +// Clone creates a duplicate of the Program. +// +// Closing the duplicate does not affect the original, and vice versa. +// +// Cloning a nil Program returns nil. +func (p *Program) Clone() (*Program, error) { + if p == nil { + return nil, nil + } + + dup, err := p.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone program: %w", err) + } + + return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil +} + +// Pin persists the Program on the BPF virtual file system past the lifetime of +// the process that created it +// +// Calling Pin on a previously pinned program will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (p *Program) Pin(fileName string) error { + if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { + return err + } + p.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the Program from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Program returns nil. +func (p *Program) Unpin() error { + if err := internal.Unpin(p.pinnedPath); err != nil { + return err + } + p.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Program has a non-empty pinned path. +func (p *Program) IsPinned() bool { + return p.pinnedPath != "" +} + +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. +func (p *Program) Close() error { + if p == nil { + return nil + } + + return p.fd.Close() +} + +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + // + // The kernel expects at least 14 bytes input for an ethernet header for + // XDP and SKB programs. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Minimum number of times to run Program. Optional field. Defaults to 1. + // + // The program may be executed more often than this due to interruptions, e.g. + // when runtime.AllThreadsSyscall is invoked. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + // + // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead. + Reset func() +} + +// Test runs the Program in the kernel with the given input and returns the +// value returned by the eBPF program. +// +// Note: the kernel expects at least 14 bytes input for an ethernet header for +// XDP and SKB programs. +// +// This function requires at least Linux 4.12. +func (p *Program) Test(in []byte) (uint32, []byte, error) { + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.run(&opts) + if err != nil { + return ret, nil, fmt.Errorf("test program: %w", err) + } + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + ret, _, err := p.run(opts) + if err != nil { + return ret, fmt.Errorf("run program: %w", err) + } + return ret, nil +} + +// Benchmark runs the Program with the given input for a number of times +// and returns the time taken per iteration. +// +// Returns the result of the last execution of the program and the time per +// run or an error. reset is called whenever the benchmark syscall is +// interrupted, and should be set to testing.B.ResetTimer or similar. +// +// This function requires at least Linux 4.12. +func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.run(&opts) + if err != nil { + return ret, total, fmt.Errorf("benchmark program: %w", err) + } + return ret, total, nil +} + +var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error { + prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. + Type: SocketFilter, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + // This may be because we lack sufficient permissions, etc. + return err + } + defer prog.Close() + + in := internal.EmptyBPFContext + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.NewSlicePointer(in), + } + + err = sys.ProgRun(&attr) + switch { + case errors.Is(err, unix.EINVAL): + // Check for EINVAL specifically, rather than err != nil since we + // otherwise misdetect due to insufficient permissions. + return internal.ErrNotSupported + + case errors.Is(err, unix.EINTR): + // We know that PROG_TEST_RUN is supported if we get EINTR. + return nil + + case errors.Is(err, sys.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil + } + + return err +}) + +func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") + } + + if err := haveProgRun(); err != nil { + return 0, 0, err + } + + var ctxBytes []byte + if opts.Context != nil { + ctx := new(bytes.Buffer) + if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } + ctxBytes = ctx.Bytes() + } + + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) + } + + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.NewSlicePointer(opts.Data), + DataOut: sys.NewSlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxBytes)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.NewSlicePointer(ctxBytes), + CtxOut: sys.NewSlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, + } + +retry: + for { + err := sys.ProgRun(&attr) + if err == nil { + break retry + } + + if errors.Is(err, unix.EINTR) { + if attr.Repeat <= 1 { + // Older kernels check whether enough repetitions have been + // executed only after checking for pending signals. + // + // run signal? done? run ... + // + // As a result we can get EINTR for repeat==1 even though + // the program was run exactly once. Treat this as a + // successful run instead. + // + // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code") + // the conditions are reversed: + // run done? signal? ... + break retry + } + + if opts.Reset != nil { + opts.Reset() + } + continue retry + } + + if errors.Is(err, sys.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, err + } + + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] + } + + if len(ctxOut) != 0 { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } + } + + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, total, nil +} + +func unmarshalProgram(buf sysenc.Buffer) (*Program, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + return NewProgramFromID(ProgramID(id)) +} + +func marshalProgram(p *Program, length int) ([]byte, error) { + if length != 4 { + return nil, fmt.Errorf("can't marshal program to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) + return buf, nil +} + +// LoadPinnedProgram loads a Program from a BPF file. +// +// Requires at least Linux 4.11. +func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + info, err := newProgramInfoFromFd(fd) + if err != nil { + _ = fd.Close() + return nil, fmt.Errorf("info for %s: %w", fileName, err) + } + + var progName string + if haveObjName() == nil { + progName = info.Name + } else { + progName = filepath.Base(fileName) + } + + return &Program{"", fd, progName, fileName, info.Type}, nil +} + +// SanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. Use this to automatically generate valid names for maps +// and programs at runtime. +// +// The set of allowed characters depends on the running kernel version. +// Dots are only allowed as of kernel 5.2. +func SanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + if invalidBPFObjNameChar(char) { + return replacement + } + return char + }, name) +} + +// ProgramGetNextID returns the ID of the next eBPF program. +// +// Returns ErrNotExist, if there is no next eBPF program. +func ProgramGetNextID(startID ProgramID) (ProgramID, error) { + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) +} + +// BindMap binds map to the program and is only released once program is released. +// +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), + } + + return sys.ProgBindMap(attr) +} + +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// name, progType and attachType determine which type we need to attach to. +// +// The attach target may be in a loaded kernel module. +// In that case the returned handle will be non-nil. +// The caller is responsible for closing the handle. +// +// Returns errUnrecognizedAttachType if the combination of progType and attachType +// is not recognised. +func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var ( + typeName, featureName string + target btf.Type + ) + + switch (match{progType, attachType}) { + case match{LSM, AttachLSMMac}: + typeName = "bpf_lsm_" + name + featureName = name + " LSM hook" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceIter}: + typeName = "bpf_iter_" + name + featureName = name + " iterator" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachModifyReturn}: + typeName = name + featureName = fmt.Sprintf("fmod_ret %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + target = (*btf.Typedef)(nil) + default: + return nil, 0, errUnrecognizedAttachType + } + + spec, err := btf.LoadKernelSpec() + if err != nil { + return nil, 0, fmt.Errorf("load kernel spec: %w", err) + } + + spec, module, err := findTargetInKernel(spec, typeName, &target) + if errors.Is(err, btf.ErrNotFound) { + return nil, 0, &internal.UnsupportedFeatureError{Name: featureName} + } + // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel + // symbols, we should explicitly refuse program loads. They will not reliably + // do what the caller intended. + if errors.Is(err, btf.ErrMultipleMatches) { + return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err) + } + if err != nil { + return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err) + } + + id, err := spec.TypeID(target) + if err != nil { + module.Close() + return nil, 0, err + } + + return module, id, nil +} + +// findTargetInKernel attempts to find a named type in the current kernel. +// +// target will point at the found type after a successful call. Searches both +// vmlinux and any loaded modules. +// +// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound +// if the type wasn't found at all. +func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + err := kernelSpec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + spec, module, err := findTargetInModule(kernelSpec, typeName, target) + if err != nil { + return nil, nil, fmt.Errorf("find target in modules: %w", err) + } + return spec, module, nil + } + if err != nil { + return nil, nil, fmt.Errorf("find target in vmlinux: %w", err) + } + return kernelSpec, nil, err +} + +// findTargetInModule attempts to find a named type in any loaded module. +// +// base must contain the kernel's types and is used to parse kmod BTF. Modules +// are searched in the order they were loaded. +// +// Returns btf.ErrNotFound if the target can't be found in any module. +func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + it := new(btf.HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err) + } + + if !info.IsModule() { + continue + } + + spec, err := it.Handle.Spec(base) + if err != nil { + return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err) + } + + err = spec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + continue + } + if err != nil { + return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err) + } + + return spec, it.Take(), nil + } + if err := it.Err(); err != nil { + return nil, nil, fmt.Errorf("iterate modules: %w", err) + } + + return nil, nil, btf.ErrNotFound +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}, + match{Tracing, AttachTraceFEntry}, + match{Tracing, AttachTraceFExit}: + typeName = name + default: + return 0, errUnrecognizedAttachType + } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return 0, err + } + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + if err != nil { + return 0, fmt.Errorf("find target %s: %w", typeName, err) + } + + return spec.TypeID(targetFunc) +} diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go new file mode 100644 index 0000000000..4aef7faebc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -0,0 +1,337 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + "runtime" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + // pre-allocating these here since they may + // get called in hot code paths and cause + // unnecessary memory allocations + sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT) + sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST) + sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP) +) + +// invalidBPFObjNameChar returns true if char may not appear in +// a BPF object name. +func invalidBPFObjNameChar(char rune) bool { + dotAllowed := objNameAllowsDot() == nil + + switch { + case char >= 'A' && char <= 'Z': + return false + case char >= 'a' && char <= 'z': + return false + case char >= '0' && char <= '9': + return false + case dotAllowed && char == '.': + return false + case char == '_': + return false + default: + return true + } +} + +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err + } + bytecode := buf.Bytes() + + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) +} + +var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error { + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + // Invalid file descriptor. + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error { + // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since + // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_RDONLY_PROG, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error { + // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_MMAPABLE, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error { + // This checks BPF_F_INNER_MAP, which appeared in 5.10. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_INNER_MAP, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +func wrapMapError(err error) error { + if err == nil { + return nil + } + + if errors.Is(err, unix.ENOENT) { + return sysErrKeyNotExist + } + + if errors.Is(err, unix.EEXIST) { + return sysErrKeyExist + } + + if errors.Is(err, sys.ENOTSUPP) { + return sysErrNotSupported + } + + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) + } + + return err +} + +var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error { + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName("feature_test"), + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}) + +var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error { + if err := haveObjName(); err != nil { + return err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName(".test"), + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}) + +var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error { + var maxEntries uint32 = 2 + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: maxEntries, + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + defer fd.Close() + + keys := []uint32{1, 2} + values := []uint32{3, 4} + kp, _ := marshalMapSyscallInput(keys, 8) + vp, _ := marshalMapSyscallInput(values, 8) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) + if err != nil { + return internal.ErrNotSupported + } + return nil +}) + +var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), + } + + fd, err := progLoad(insns, Kprobe, "GPL") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) + +var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) + +var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error { + prefix := internal.PlatformPrefix() + if prefix == "" { + return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Symbol: prefix + "sys_bpf", + Pid: -1, + } + + var err error + args.Group, err = tracefs.RandomGroup("ebpf_probe") + if err != nil { + return err + } + + evt, err := tracefs.NewEvent(args) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + return evt.Close() +}) + +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error { + insns := asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err + } + bytecode := buf.Bytes() + + _, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(SocketFilter), + License: sys.NewStringPointer("MIT"), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + FuncInfoCnt: 1, + ProgBtfFd: math.MaxUint32, + }) + + if errors.Is(err, unix.EBADF) { + return nil + } + + if errors.Is(err, unix.E2BIG) { + return ErrNotSupported + } + + return err +}) diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go new file mode 100644 index 0000000000..542c2397ca --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types.go @@ -0,0 +1,299 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output types_string.go -type=MapType,ProgramType,PinType + +// MapType indicates the type map structure +// that will be initialized in the kernel. +type MapType uint32 + +// All the various map types that can be created +const ( + UnspecifiedMap MapType = iota + // Hash is a hash map + Hash + // Array is an array map + Array + // ProgramArray - A program array map is a special kind of array map whose map + // values contain only file descriptors referring to other eBPF + // programs. Thus, both the key_size and value_size must be + // exactly four bytes. This map is used in conjunction with the + // TailCall helper. + ProgramArray + // PerfEventArray - A perf event array is used in conjunction with PerfEventRead + // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. + PerfEventArray + // PerCPUHash - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + PerCPUHash + // PerCPUArray - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + // Each CPU gets a copy of this hash, the contents of all of which can be reconciled + // later. + PerCPUArray + // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with + // GetStackID + StackTrace + // CGroupArray - This is a very niche structure used to help SKBInCGroup determine + // if an skb is from a socket belonging to a specific cgroup + CGroupArray + // LRUHash - This allows you to create a small hash structure that will purge the + // least recently used items rather than throw an error when you run out of memory + LRUHash + // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, + // it has more to do with including the CPU id with the LRU calculation so that if a + // particular CPU is using a value over-and-over again, then it will be saved, but if + // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically + // giving weight to CPU locality over overall usage. + LRUCPUHash + // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, + // for storing things like IP addresses which can be bit masked allowing for keys of differing + // values to refer to the same reference based on their masks. See wikipedia for more details. + LPMTrie + // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps + // itself. + ArrayOfMaps + // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps + // itself. + HashOfMaps + // DevMap - Specialized map to store references to network devices. + DevMap + // SockMap - Specialized map to store references to sockets. + SockMap + // CPUMap - Specialized map to store references to CPUs. + CPUMap + // XSKMap - Specialized map for XDP programs to store references to open sockets. + XSKMap + // SockHash - Specialized hash to store references to sockets. + SockHash + // CGroupStorage - Special map for CGroups. + CGroupStorage + // ReusePortSockArray - Specialized map to store references to sockets that can be reused. + ReusePortSockArray + // PerCPUCGroupStorage - Special per CPU map for CGroups. + PerCPUCGroupStorage + // Queue - FIFO storage for BPF programs. + Queue + // Stack - LIFO storage for BPF programs. + Stack + // SkStorage - Specialized map for local storage at SK for BPF programs. + SkStorage + // DevMapHash - Hash-based indexing scheme for references to network devices. + DevMapHash + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. + RingBuf + // InodeStorage - Specialized local storage map for inodes. + InodeStorage + // TaskStorage - Specialized local storage map for task_struct. + TaskStorage +) + +// hasPerCPUValue returns true if the Map stores a value per CPU. +func (mt MapType) hasPerCPUValue() bool { + return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage +} + +// canStoreMapOrProgram returns true if the Map stores references to another Map +// or Program. +func (mt MapType) canStoreMapOrProgram() bool { + return mt.canStoreMap() || mt.canStoreProgram() +} + +// canStoreMap returns true if the map type accepts a map fd +// for update and returns a map id for lookup. +func (mt MapType) canStoreMap() bool { + return mt == ArrayOfMaps || mt == HashOfMaps +} + +// canStoreProgram returns true if the map type accepts a program fd +// for update and returns a program id for lookup. +func (mt MapType) canStoreProgram() bool { + return mt == ProgramArray +} + +// ProgramType of the eBPF program +type ProgramType uint32 + +// eBPF program types +const ( + UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) + SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) + Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE) + SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS) + SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT) + TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT) + XDP = ProgramType(sys.BPF_PROG_TYPE_XDP) + PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT) + CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB) + CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK) + LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN) + LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT) + LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT) + SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS) + SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB) + CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE) + SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG) + RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT) + CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR) + LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL) + LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2) + SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT) + FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR) + CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL) + RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) + CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT) + Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING) + StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS) + Extension = ProgramType(sys.BPF_PROG_TYPE_EXT) + LSM = ProgramType(sys.BPF_PROG_TYPE_LSM) + SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP) + Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL) + Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) +) + +// AttachType of the eBPF program, needed to differentiate allowed context accesses in +// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. +// Will cause invalid argument (EINVAL) at program load time if set incorrectly. +type AttachType uint32 + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type AttachType -trimprefix Attach + +// AttachNone is an alias for AttachCGroupInetIngress for readability reasons. +const AttachNone AttachType = 0 + +const ( + AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) + AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) + AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE) + AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS) + AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER) + AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT) + AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE) + AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT) + AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND) + AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND) + AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT) + AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT) + AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND) + AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND) + AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG) + AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG) + AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2) + AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR) + AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL) + AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG) + AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG) + AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT) + AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT) + AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP) + AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY) + AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT) + AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN) + AttachLSMMac = AttachType(sys.BPF_LSM_MAC) + AttachTraceIter = AttachType(sys.BPF_TRACE_ITER) + AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME) + AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME) + AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME) + AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME) + AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP) + AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE) + AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP) + AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP) + AttachXDP = AttachType(sys.BPF_XDP) + AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT) + AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT) + AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) + AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) + AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) + AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) + AttachNetfilter = AttachType(sys.BPF_NETFILTER) + AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS) + AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS) + AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI) + AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT) + AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG) + AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG) + AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME) + AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME) + AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY) + AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER) +) + +// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command +type AttachFlags uint32 + +// PinType determines whether a map is pinned into a BPFFS. +type PinType uint32 + +// Valid pin types. +// +// Mirrors enum libbpf_pin_type. +const ( + PinNone PinType = iota + // Pin an object by using its name as the filename. + PinByName +) + +// LoadPinOptions control how a pinned object is loaded. +type LoadPinOptions struct { + // Request a read-only or write-only object. The default is a read-write + // object. Only one of the flags may be set. + ReadOnly bool + WriteOnly bool + + // Raw flags for the syscall. Other fields of this struct take precedence. + Flags uint32 +} + +// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. +func (lpo *LoadPinOptions) Marshal() uint32 { + if lpo == nil { + return 0 + } + + flags := lpo.Flags + if lpo.ReadOnly { + flags |= unix.BPF_F_RDONLY + } + if lpo.WriteOnly { + flags |= unix.BPF_F_WRONLY + } + return flags +} + +// BatchOptions batch map operations options +// +// Mirrors libbpf struct bpf_map_batch_opts +// Currently BPF_F_FLAG is the only supported +// flag (for ElemFlags). +type BatchOptions struct { + ElemFlags uint64 + Flags uint64 +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +// These constants can be used for the ProgramOptions.LogLevel field. +type LogLevel = sys.LogLevel + +const ( + // Print verifier state at branch points. + LogLevelBranch = sys.BPF_LOG_LEVEL1 + + // Print verifier state for every instruction. + // Available since Linux v5.2. + LogLevelInstruction = sys.BPF_LOG_LEVEL2 + + // Print verifier errors and stats at the end of the verification process. + // Available since Linux v5.2. + LogLevelStats = sys.BPF_LOG_STATS +) diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go new file mode 100644 index 0000000000..ee60b5be5b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -0,0 +1,119 @@ +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedMap-0] + _ = x[Hash-1] + _ = x[Array-2] + _ = x[ProgramArray-3] + _ = x[PerfEventArray-4] + _ = x[PerCPUHash-5] + _ = x[PerCPUArray-6] + _ = x[StackTrace-7] + _ = x[CGroupArray-8] + _ = x[LRUHash-9] + _ = x[LRUCPUHash-10] + _ = x[LPMTrie-11] + _ = x[ArrayOfMaps-12] + _ = x[HashOfMaps-13] + _ = x[DevMap-14] + _ = x[SockMap-15] + _ = x[CPUMap-16] + _ = x[XSKMap-17] + _ = x[SockHash-18] + _ = x[CGroupStorage-19] + _ = x[ReusePortSockArray-20] + _ = x[PerCPUCGroupStorage-21] + _ = x[Queue-22] + _ = x[Stack-23] + _ = x[SkStorage-24] + _ = x[DevMapHash-25] + _ = x[StructOpsMap-26] + _ = x[RingBuf-27] + _ = x[InodeStorage-28] + _ = x[TaskStorage-29] +} + +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage" + +var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290} + +func (i MapType) String() string { + if i >= MapType(len(_MapType_index)-1) { + return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedProgram-0] + _ = x[SocketFilter-1] + _ = x[Kprobe-2] + _ = x[SchedCLS-3] + _ = x[SchedACT-4] + _ = x[TracePoint-5] + _ = x[XDP-6] + _ = x[PerfEvent-7] + _ = x[CGroupSKB-8] + _ = x[CGroupSock-9] + _ = x[LWTIn-10] + _ = x[LWTOut-11] + _ = x[LWTXmit-12] + _ = x[SockOps-13] + _ = x[SkSKB-14] + _ = x[CGroupDevice-15] + _ = x[SkMsg-16] + _ = x[RawTracepoint-17] + _ = x[CGroupSockAddr-18] + _ = x[LWTSeg6Local-19] + _ = x[LircMode2-20] + _ = x[SkReuseport-21] + _ = x[FlowDissector-22] + _ = x[CGroupSysctl-23] + _ = x[RawTracepointWritable-24] + _ = x[CGroupSockopt-25] + _ = x[Tracing-26] + _ = x[StructOps-27] + _ = x[Extension-28] + _ = x[LSM-29] + _ = x[SkLookup-30] + _ = x[Syscall-31] + _ = x[Netfilter-32] +} + +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" + +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} + +func (i ProgramType) String() string { + if i >= ProgramType(len(_ProgramType_index)-1) { + return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PinNone-0] + _ = x[PinByName-1] +} + +const _PinType_name = "PinNonePinByName" + +var _PinType_index = [...]uint8{0, 7, 16} + +func (i PinType) String() string { + if i >= PinType(len(_PinType_index)-1) { + return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PinType_name[_PinType_index[i]:_PinType_index[i+1]] +} diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml new file mode 100644 index 0000000000..a695775df4 --- /dev/null +++ b/vendor/github.com/containerd/log/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE new file mode 100644 index 0000000000..584149b6ee --- /dev/null +++ b/vendor/github.com/containerd/log/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md new file mode 100644 index 0000000000..00e0849880 --- /dev/null +++ b/vendor/github.com/containerd/log/README.md @@ -0,0 +1,17 @@ +# log + +A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. + +This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. +In the future this package may be replaced with a common go logging interface. + +## Project details + +**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. + diff --git a/vendor/github.com/containerd/log/context.go b/vendor/github.com/containerd/log/context.go new file mode 100644 index 0000000000..20153066f3 --- /dev/null +++ b/vendor/github.com/containerd/log/context.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package log provides types and functions related to logging, passing +// loggers through a context, and attaching context to the logger. +// +// # Transitional types +// +// This package contains various types that are aliases for types in [logrus]. +// These aliases are intended for transitioning away from hard-coding logrus +// as logging implementation. Consumers of this package are encouraged to use +// the type-aliases from this package instead of directly using their logrus +// equivalent. +// +// The intent is to replace these aliases with locally defined types and +// interfaces once all consumers are no longer directly importing logrus +// types. +// +// IMPORTANT: due to the transitional purpose of this package, it is not +// guaranteed for the full logrus API to be provided in the future. As +// outlined, these aliases are provided as a step to transition away from +// a specific implementation which, as a result, exposes the full logrus API. +// While no decisions have been made on the ultimate design and interface +// provided by this package, we do not expect carrying "less common" features. +package log + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" +) + +// G is a shorthand for [GetLogger]. +// +// We may want to define this locally to a package to get package tagged log +// messages. +var G = GetLogger + +// L is an alias for the standard logger. +var L = &Entry{ + Logger: logrus.StandardLogger(), + // Default is three fields plus a little extra room. + Data: make(Fields, 6), +} + +type loggerKey struct{} + +// Fields type to pass to "WithFields". +type Fields = map[string]any + +// Entry is a logging entry. It contains all the fields passed with +// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, +// Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +// +// Entry is a transitional type, and currently an alias for [logrus.Entry]. +type Entry = logrus.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// Level is a logging level. +type Level = logrus.Level + +// Supported log levels. +const ( + // TraceLevel level. Designates finer-grained informational events + // than [DebugLevel]. + TraceLevel Level = logrus.TraceLevel + + // DebugLevel level. Usually only enabled when debugging. Very verbose + // logging. + DebugLevel Level = logrus.DebugLevel + + // InfoLevel level. General operational entries about what's going on + // inside the application. + InfoLevel Level = logrus.InfoLevel + + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel Level = logrus.WarnLevel + + // ErrorLevel level. Logs errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel Level = logrus.ErrorLevel + + // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits + // even if the logging level is set to Panic. + FatalLevel Level = logrus.FatalLevel + + // PanicLevel level. This is the highest level of severity. Logs and + // then calls panic with the message passed to Debug, Info, ... + PanicLevel Level = logrus.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// level can be one of: +// +// - "trace" ([TraceLevel]) +// - "debug" ([DebugLevel]) +// - "info" ([InfoLevel]) +// - "warn" ([WarnLevel]) +// - "error" ([ErrorLevel]) +// - "fatal" ([FatalLevel]) +// - "panic" ([PanicLevel]) +func SetLevel(level string) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return err + } + + L.Logger.SetLevel(lvl) + return nil +} + +// GetLevel returns the current log level. +func GetLevel() Level { + return L.Logger.GetLevel() +} + +// OutputFormat specifies a log output format. +type OutputFormat string + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + TextFormat OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + JSONFormat OutputFormat = "json" +) + +// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). +func SetFormat(format OutputFormat) error { + switch format { + case TextFormat: + L.Logger.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: RFC3339NanoFixed, + FullTimestamp: true, + }) + return nil + case JSONFormat: + L.Logger.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: RFC3339NanoFixed, + }) + return nil + default: + return fmt.Errorf("unknown log format: %s", format) + } +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *Entry { + if logger := ctx.Value(loggerKey{}); logger != nil { + return logger.(*Entry) + } + return L.WithContext(ctx) +} diff --git a/vendor/github.com/containerd/nri/LICENSE b/vendor/github.com/containerd/nri/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/containerd/nri/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/nri/pkg/api/adjustment.go b/vendor/github.com/containerd/nri/pkg/api/adjustment.go new file mode 100644 index 0000000000..74cd725dbc --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/adjustment.go @@ -0,0 +1,334 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +// +// Notes: +// Adjustment of metadata that is stored in maps (labels and annotations) +// currently assumes that a single plugin will never do an add prior to a +// delete for any key. IOW, it is always assumed that if both a deletion +// and an addition/setting was recorded for a key then the final desired +// state is the addition. This seems like a reasonably safe assumption. A +// removal is usually done only to protect against triggering the conflict +// in the runtime when a plugin intends to touch a key which is known to +// have been put there or already modified by another plugin. +// +// An alternative without this implicit ordering assumption would be to +// store the adjustment for such data as a sequence of add/del operations +// in a slice. At the moment that does not seem to be necessary. +// + +// AddAnnotation records the addition of the annotation key=value. +func (a *ContainerAdjustment) AddAnnotation(key, value string) { + a.initAnnotations() + a.Annotations[key] = value +} + +// RemoveAnnotation records the removal of the annotation for the given key. +// Normally it is an error for a plugin to try and alter an annotation +// touched by another plugin. However, this is not an error if the plugin +// removes that annotation prior to touching it. +func (a *ContainerAdjustment) RemoveAnnotation(key string) { + a.initAnnotations() + a.Annotations[MarkForRemoval(key)] = "" +} + +// AddMount records the addition of a mount to a container. +func (a *ContainerAdjustment) AddMount(m *Mount) { + a.Mounts = append(a.Mounts, m) // TODO: should we dup m here ? +} + +// RemoveMount records the removal of a mount from a container. +// Normally it is an error for a plugin to try and alter a mount +// touched by another plugin. However, this is not an error if the +// plugin removes that mount prior to touching it. +func (a *ContainerAdjustment) RemoveMount(ContainerPath string) { + a.Mounts = append(a.Mounts, &Mount{ + Destination: MarkForRemoval(ContainerPath), + }) +} + +// AddEnv records the addition of an environment variable to a container. +func (a *ContainerAdjustment) AddEnv(key, value string) { + a.Env = append(a.Env, &KeyValue{ + Key: key, + Value: value, + }) +} + +// RemoveEnv records the removal of an environment variable from a container. +// Normally it is an error for a plugin to try and alter an environment +// variable touched by another container. However, this is not an error if +// the plugin removes that variable prior to touching it. +func (a *ContainerAdjustment) RemoveEnv(key string) { + a.Env = append(a.Env, &KeyValue{ + Key: MarkForRemoval(key), + }) +} + +// AddHooks records the addition of the given hooks to a container. +func (a *ContainerAdjustment) AddHooks(h *Hooks) { + a.initHooks() + if h.Prestart != nil { + a.Hooks.Prestart = append(a.Hooks.Prestart, h.Prestart...) + } + if h.CreateRuntime != nil { + a.Hooks.CreateRuntime = append(a.Hooks.CreateRuntime, h.CreateRuntime...) + } + if h.CreateContainer != nil { + a.Hooks.CreateContainer = append(a.Hooks.CreateContainer, h.CreateContainer...) + } + if h.StartContainer != nil { + a.Hooks.StartContainer = append(a.Hooks.StartContainer, h.StartContainer...) + } + if h.Poststart != nil { + a.Hooks.Poststart = append(a.Hooks.Poststart, h.Poststart...) + } + if h.Poststop != nil { + a.Hooks.Poststop = append(a.Hooks.Poststop, h.Poststop...) + } +} + +func (a *ContainerAdjustment) AddRlimit(typ string, hard, soft uint64) { + a.initRlimits() + a.Rlimits = append(a.Rlimits, &POSIXRlimit{ + Type: typ, + Hard: hard, + Soft: soft, + }) +} + +// AddDevice records the addition of the given device to a container. +func (a *ContainerAdjustment) AddDevice(d *LinuxDevice) { + a.initLinux() + a.Linux.Devices = append(a.Linux.Devices, d) // TODO: should we dup d here ? +} + +// RemoveDevice records the removal of a device from a container. +// Normally it is an error for a plugin to try and alter an device +// touched by another container. However, this is not an error if +// the plugin removes that device prior to touching it. +func (a *ContainerAdjustment) RemoveDevice(path string) { + a.initLinux() + a.Linux.Devices = append(a.Linux.Devices, &LinuxDevice{ + Path: MarkForRemoval(path), + }) +} + +// AddCDIDevice records the addition of the given CDI device to a container. +func (a *ContainerAdjustment) AddCDIDevice(d *CDIDevice) { + a.CDIDevices = append(a.CDIDevices, d) // TODO: should we dup d here ? +} + +// SetLinuxMemoryLimit records setting the memory limit for a container. +func (a *ContainerAdjustment) SetLinuxMemoryLimit(value int64) { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.Limit = Int64(value) +} + +// SetLinuxMemoryReservation records setting the memory reservation for a container. +func (a *ContainerAdjustment) SetLinuxMemoryReservation(value int64) { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.Reservation = Int64(value) +} + +// SetLinuxMemorySwap records records setting the memory swap limit for a container. +func (a *ContainerAdjustment) SetLinuxMemorySwap(value int64) { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.Swap = Int64(value) +} + +// SetLinuxMemoryKernel records setting the memory kernel limit for a container. +func (a *ContainerAdjustment) SetLinuxMemoryKernel(value int64) { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.Kernel = Int64(value) +} + +// SetLinuxMemoryKernelTCP records setting the memory kernel TCP limit for a container. +func (a *ContainerAdjustment) SetLinuxMemoryKernelTCP(value int64) { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.KernelTcp = Int64(value) +} + +// SetLinuxMemorySwappiness records setting the memory swappiness for a container. +func (a *ContainerAdjustment) SetLinuxMemorySwappiness(value uint64) { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.Swappiness = UInt64(value) +} + +// SetLinuxMemoryDisableOomKiller records disabling the OOM killer for a container. +func (a *ContainerAdjustment) SetLinuxMemoryDisableOomKiller() { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.DisableOomKiller = Bool(true) +} + +// SetLinuxMemoryUseHierarchy records enabling hierarchical memory accounting for a container. +func (a *ContainerAdjustment) SetLinuxMemoryUseHierarchy() { + a.initLinuxResourcesMemory() + a.Linux.Resources.Memory.UseHierarchy = Bool(true) +} + +// SetLinuxCPUShares records setting the scheduler's CPU shares for a container. +func (a *ContainerAdjustment) SetLinuxCPUShares(value uint64) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.Shares = UInt64(value) +} + +// SetLinuxCPUQuota records setting the scheduler's CPU quota for a container. +func (a *ContainerAdjustment) SetLinuxCPUQuota(value int64) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.Quota = Int64(value) +} + +// SetLinuxCPUPeriod records setting the scheduler's CPU period for a container. +func (a *ContainerAdjustment) SetLinuxCPUPeriod(value int64) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.Period = UInt64(value) +} + +// SetLinuxCPURealtimeRuntime records setting the scheduler's realtime runtime for a container. +func (a *ContainerAdjustment) SetLinuxCPURealtimeRuntime(value int64) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.RealtimeRuntime = Int64(value) +} + +// SetLinuxCPURealtimePeriod records setting the scheduler's realtime period for a container. +func (a *ContainerAdjustment) SetLinuxCPURealtimePeriod(value uint64) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.RealtimePeriod = UInt64(value) +} + +// SetLinuxCPUSetCPUs records setting the cpuset CPUs for a container. +func (a *ContainerAdjustment) SetLinuxCPUSetCPUs(value string) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.Cpus = value +} + +// SetLinuxCPUSetMems records setting the cpuset memory for a container. +func (a *ContainerAdjustment) SetLinuxCPUSetMems(value string) { + a.initLinuxResourcesCPU() + a.Linux.Resources.Cpu.Mems = value +} + +// SetLinuxPidLimits records setting the pid max number for a container. +func (a *ContainerAdjustment) SetLinuxPidLimits(value int64) { + a.initLinuxResourcesPids() + a.Linux.Resources.Pids.Limit = value +} + +// AddLinuxHugepageLimit records adding a hugepage limit for a container. +func (a *ContainerAdjustment) AddLinuxHugepageLimit(pageSize string, value uint64) { + a.initLinuxResources() + a.Linux.Resources.HugepageLimits = append(a.Linux.Resources.HugepageLimits, + &HugepageLimit{ + PageSize: pageSize, + Limit: value, + }) +} + +// SetLinuxBlockIOClass records setting the Block I/O class for a container. +func (a *ContainerAdjustment) SetLinuxBlockIOClass(value string) { + a.initLinuxResources() + a.Linux.Resources.BlockioClass = String(value) +} + +// SetLinuxRDTClass records setting the RDT class for a container. +func (a *ContainerAdjustment) SetLinuxRDTClass(value string) { + a.initLinuxResources() + a.Linux.Resources.RdtClass = String(value) +} + +// AddLinuxUnified sets a cgroupv2 unified resource. +func (a *ContainerAdjustment) AddLinuxUnified(key, value string) { + a.initLinuxResourcesUnified() + a.Linux.Resources.Unified[key] = value +} + +// SetLinuxCgroupsPath records setting the cgroups path for a container. +func (a *ContainerAdjustment) SetLinuxCgroupsPath(value string) { + a.initLinux() + a.Linux.CgroupsPath = value +} + +// SetLinuxOomScoreAdj records setting the kernel's Out-Of-Memory (OOM) killer score for a container. +func (a *ContainerAdjustment) SetLinuxOomScoreAdj(value *int) { + a.initLinux() + a.Linux.OomScoreAdj = Int(value) // using Int(value) from ./options.go to optionally allocate a pointer to normalized copy of value +} + +// +// Initializing a container adjustment and container update. +// + +func (a *ContainerAdjustment) initAnnotations() { + if a.Annotations == nil { + a.Annotations = make(map[string]string) + } +} + +func (a *ContainerAdjustment) initHooks() { + if a.Hooks == nil { + a.Hooks = &Hooks{} + } +} + +func (a *ContainerAdjustment) initRlimits() { + if a.Rlimits == nil { + a.Rlimits = []*POSIXRlimit{} + } +} + +func (a *ContainerAdjustment) initLinux() { + if a.Linux == nil { + a.Linux = &LinuxContainerAdjustment{} + } +} + +func (a *ContainerAdjustment) initLinuxResources() { + a.initLinux() + if a.Linux.Resources == nil { + a.Linux.Resources = &LinuxResources{} + } +} + +func (a *ContainerAdjustment) initLinuxResourcesMemory() { + a.initLinuxResources() + if a.Linux.Resources.Memory == nil { + a.Linux.Resources.Memory = &LinuxMemory{} + } +} + +func (a *ContainerAdjustment) initLinuxResourcesCPU() { + a.initLinuxResources() + if a.Linux.Resources.Cpu == nil { + a.Linux.Resources.Cpu = &LinuxCPU{} + } +} + +func (a *ContainerAdjustment) initLinuxResourcesPids() { + a.initLinuxResources() + if a.Linux.Resources.Pids == nil { + a.Linux.Resources.Pids = &LinuxPids{} + } +} + +func (a *ContainerAdjustment) initLinuxResourcesUnified() { + a.initLinuxResources() + if a.Linux.Resources.Unified == nil { + a.Linux.Resources.Unified = make(map[string]string) + } +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api.pb.go b/vendor/github.com/containerd/nri/pkg/api/api.pb.go new file mode 100644 index 0000000000..723b47e520 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api.pb.go @@ -0,0 +1,4765 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.20.1 +// source: pkg/api/api.proto + +package api + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Events that plugins can subscribe to in ConfigureResponse. +type Event int32 + +const ( + Event_UNKNOWN Event = 0 + Event_RUN_POD_SANDBOX Event = 1 + Event_STOP_POD_SANDBOX Event = 2 + Event_REMOVE_POD_SANDBOX Event = 3 + Event_CREATE_CONTAINER Event = 4 + Event_POST_CREATE_CONTAINER Event = 5 + Event_START_CONTAINER Event = 6 + Event_POST_START_CONTAINER Event = 7 + Event_UPDATE_CONTAINER Event = 8 + Event_POST_UPDATE_CONTAINER Event = 9 + Event_STOP_CONTAINER Event = 10 + Event_REMOVE_CONTAINER Event = 11 + Event_LAST Event = 12 +) + +// Enum value maps for Event. +var ( + Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RUN_POD_SANDBOX", + 2: "STOP_POD_SANDBOX", + 3: "REMOVE_POD_SANDBOX", + 4: "CREATE_CONTAINER", + 5: "POST_CREATE_CONTAINER", + 6: "START_CONTAINER", + 7: "POST_START_CONTAINER", + 8: "UPDATE_CONTAINER", + 9: "POST_UPDATE_CONTAINER", + 10: "STOP_CONTAINER", + 11: "REMOVE_CONTAINER", + 12: "LAST", + } + Event_value = map[string]int32{ + "UNKNOWN": 0, + "RUN_POD_SANDBOX": 1, + "STOP_POD_SANDBOX": 2, + "REMOVE_POD_SANDBOX": 3, + "CREATE_CONTAINER": 4, + "POST_CREATE_CONTAINER": 5, + "START_CONTAINER": 6, + "POST_START_CONTAINER": 7, + "UPDATE_CONTAINER": 8, + "POST_UPDATE_CONTAINER": 9, + "STOP_CONTAINER": 10, + "REMOVE_CONTAINER": 11, + "LAST": 12, + } +) + +func (x Event) Enum() *Event { + p := new(Event) + *p = x + return p +} + +func (x Event) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Event) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_api_api_proto_enumTypes[0].Descriptor() +} + +func (Event) Type() protoreflect.EnumType { + return &file_pkg_api_api_proto_enumTypes[0] +} + +func (x Event) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Event.Descriptor instead. +func (Event) EnumDescriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{0} +} + +// Possible container states. +type ContainerState int32 + +const ( + ContainerState_CONTAINER_UNKNOWN ContainerState = 0 + ContainerState_CONTAINER_CREATED ContainerState = 1 + ContainerState_CONTAINER_PAUSED ContainerState = 2 // is this useful/necessary ? + ContainerState_CONTAINER_RUNNING ContainerState = 3 + ContainerState_CONTAINER_STOPPED ContainerState = 4 +) + +// Enum value maps for ContainerState. +var ( + ContainerState_name = map[int32]string{ + 0: "CONTAINER_UNKNOWN", + 1: "CONTAINER_CREATED", + 2: "CONTAINER_PAUSED", + 3: "CONTAINER_RUNNING", + 4: "CONTAINER_STOPPED", + } + ContainerState_value = map[string]int32{ + "CONTAINER_UNKNOWN": 0, + "CONTAINER_CREATED": 1, + "CONTAINER_PAUSED": 2, + "CONTAINER_RUNNING": 3, + "CONTAINER_STOPPED": 4, + } +) + +func (x ContainerState) Enum() *ContainerState { + p := new(ContainerState) + *p = x + return p +} + +func (x ContainerState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ContainerState) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_api_api_proto_enumTypes[1].Descriptor() +} + +func (ContainerState) Type() protoreflect.EnumType { + return &file_pkg_api_api_proto_enumTypes[1] +} + +func (x ContainerState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ContainerState.Descriptor instead. +func (ContainerState) EnumDescriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{1} +} + +type LogRequest_Level int32 + +const ( + LogRequest_LEVEL_UNSPECIFIED LogRequest_Level = 0 + LogRequest_LEVEL_DEBUG LogRequest_Level = 1 + LogRequest_LEVEL_INFO LogRequest_Level = 2 + LogRequest_LEVEL_WARN LogRequest_Level = 3 + LogRequest_LEVEL_ERROR LogRequest_Level = 4 +) + +// Enum value maps for LogRequest_Level. +var ( + LogRequest_Level_name = map[int32]string{ + 0: "LEVEL_UNSPECIFIED", + 1: "LEVEL_DEBUG", + 2: "LEVEL_INFO", + 3: "LEVEL_WARN", + 4: "LEVEL_ERROR", + } + LogRequest_Level_value = map[string]int32{ + "LEVEL_UNSPECIFIED": 0, + "LEVEL_DEBUG": 1, + "LEVEL_INFO": 2, + "LEVEL_WARN": 3, + "LEVEL_ERROR": 4, + } +) + +func (x LogRequest_Level) Enum() *LogRequest_Level { + p := new(LogRequest_Level) + *p = x + return p +} + +func (x LogRequest_Level) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LogRequest_Level) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_api_api_proto_enumTypes[2].Descriptor() +} + +func (LogRequest_Level) Type() protoreflect.EnumType { + return &file_pkg_api_api_proto_enumTypes[2] +} + +func (x LogRequest_Level) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LogRequest_Level.Descriptor instead. +func (LogRequest_Level) EnumDescriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{3, 0} +} + +type RegisterPluginRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the plugin to register. + PluginName string `protobuf:"bytes,1,opt,name=plugin_name,json=pluginName,proto3" json:"plugin_name,omitempty"` + // Plugin invocation index. Plugins are called in ascending index order. + PluginIdx string `protobuf:"bytes,2,opt,name=plugin_idx,json=pluginIdx,proto3" json:"plugin_idx,omitempty"` +} + +func (x *RegisterPluginRequest) Reset() { + *x = RegisterPluginRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegisterPluginRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterPluginRequest) ProtoMessage() {} + +func (x *RegisterPluginRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterPluginRequest.ProtoReflect.Descriptor instead. +func (*RegisterPluginRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{0} +} + +func (x *RegisterPluginRequest) GetPluginName() string { + if x != nil { + return x.PluginName + } + return "" +} + +func (x *RegisterPluginRequest) GetPluginIdx() string { + if x != nil { + return x.PluginIdx + } + return "" +} + +type UpdateContainersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of containers to update. + Update []*ContainerUpdate `protobuf:"bytes,1,rep,name=update,proto3" json:"update,omitempty"` + // List of containers to evict. + Evict []*ContainerEviction `protobuf:"bytes,2,rep,name=evict,proto3" json:"evict,omitempty"` +} + +func (x *UpdateContainersRequest) Reset() { + *x = UpdateContainersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateContainersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateContainersRequest) ProtoMessage() {} + +func (x *UpdateContainersRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateContainersRequest.ProtoReflect.Descriptor instead. +func (*UpdateContainersRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{1} +} + +func (x *UpdateContainersRequest) GetUpdate() []*ContainerUpdate { + if x != nil { + return x.Update + } + return nil +} + +func (x *UpdateContainersRequest) GetEvict() []*ContainerEviction { + if x != nil { + return x.Evict + } + return nil +} + +type UpdateContainersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Containers that the runtime failed to update. + Failed []*ContainerUpdate `protobuf:"bytes,1,rep,name=failed,proto3" json:"failed,omitempty"` +} + +func (x *UpdateContainersResponse) Reset() { + *x = UpdateContainersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateContainersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateContainersResponse) ProtoMessage() {} + +func (x *UpdateContainersResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateContainersResponse.ProtoReflect.Descriptor instead. +func (*UpdateContainersResponse) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{2} +} + +func (x *UpdateContainersResponse) GetFailed() []*ContainerUpdate { + if x != nil { + return x.Failed + } + return nil +} + +type LogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` + Level LogRequest_Level `protobuf:"varint,2,opt,name=level,proto3,enum=nri.pkg.api.v1alpha1.LogRequest_Level" json:"level,omitempty"` +} + +func (x *LogRequest) Reset() { + *x = LogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogRequest) ProtoMessage() {} + +func (x *LogRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogRequest.ProtoReflect.Descriptor instead. +func (*LogRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{3} +} + +func (x *LogRequest) GetMsg() string { + if x != nil { + return x.Msg + } + return "" +} + +func (x *LogRequest) GetLevel() LogRequest_Level { + if x != nil { + return x.Level + } + return LogRequest_LEVEL_UNSPECIFIED +} + +type ConfigureRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Any plugin-specific data, if present among the NRI configuration. + Config string `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Name of the runtime NRI is running in. + RuntimeName string `protobuf:"bytes,2,opt,name=runtime_name,json=runtimeName,proto3" json:"runtime_name,omitempty"` + // Version of the runtime NRI is running in. + RuntimeVersion string `protobuf:"bytes,3,opt,name=runtime_version,json=runtimeVersion,proto3" json:"runtime_version,omitempty"` + // Configured registration timeout in milliseconds. + RegistrationTimeout int64 `protobuf:"varint,4,opt,name=registration_timeout,json=registrationTimeout,proto3" json:"registration_timeout,omitempty"` + // Configured request processing timeout in milliseconds. + RequestTimeout int64 `protobuf:"varint,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` +} + +func (x *ConfigureRequest) Reset() { + *x = ConfigureRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureRequest) ProtoMessage() {} + +func (x *ConfigureRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureRequest.ProtoReflect.Descriptor instead. +func (*ConfigureRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{4} +} + +func (x *ConfigureRequest) GetConfig() string { + if x != nil { + return x.Config + } + return "" +} + +func (x *ConfigureRequest) GetRuntimeName() string { + if x != nil { + return x.RuntimeName + } + return "" +} + +func (x *ConfigureRequest) GetRuntimeVersion() string { + if x != nil { + return x.RuntimeVersion + } + return "" +} + +func (x *ConfigureRequest) GetRegistrationTimeout() int64 { + if x != nil { + return x.RegistrationTimeout + } + return 0 +} + +func (x *ConfigureRequest) GetRequestTimeout() int64 { + if x != nil { + return x.RequestTimeout + } + return 0 +} + +type ConfigureResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Events to subscribe the plugin for. Each bit set corresponds to an + // enumerated Event. + Events int32 `protobuf:"varint,2,opt,name=events,proto3" json:"events,omitempty"` +} + +func (x *ConfigureResponse) Reset() { + *x = ConfigureResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureResponse) ProtoMessage() {} + +func (x *ConfigureResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureResponse.ProtoReflect.Descriptor instead. +func (*ConfigureResponse) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{5} +} + +func (x *ConfigureResponse) GetEvents() int32 { + if x != nil { + return x.Events + } + return 0 +} + +type SynchronizeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pods known to the runtime. + Pods []*PodSandbox `protobuf:"bytes,1,rep,name=pods,proto3" json:"pods,omitempty"` + // Containers known to the runtime. + Containers []*Container `protobuf:"bytes,2,rep,name=containers,proto3" json:"containers,omitempty"` + // Whether there are more pods and containers to follow. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` +} + +func (x *SynchronizeRequest) Reset() { + *x = SynchronizeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SynchronizeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SynchronizeRequest) ProtoMessage() {} + +func (x *SynchronizeRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SynchronizeRequest.ProtoReflect.Descriptor instead. +func (*SynchronizeRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{6} +} + +func (x *SynchronizeRequest) GetPods() []*PodSandbox { + if x != nil { + return x.Pods + } + return nil +} + +func (x *SynchronizeRequest) GetContainers() []*Container { + if x != nil { + return x.Containers + } + return nil +} + +func (x *SynchronizeRequest) GetMore() bool { + if x != nil { + return x.More + } + return false +} + +type SynchronizeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Updates to containers requested by the plugin. + Update []*ContainerUpdate `protobuf:"bytes,1,rep,name=update,proto3" json:"update,omitempty"` + // Whether the client is able to handle more advertised pods and containers. + More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` +} + +func (x *SynchronizeResponse) Reset() { + *x = SynchronizeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SynchronizeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SynchronizeResponse) ProtoMessage() {} + +func (x *SynchronizeResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SynchronizeResponse.ProtoReflect.Descriptor instead. +func (*SynchronizeResponse) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{7} +} + +func (x *SynchronizeResponse) GetUpdate() []*ContainerUpdate { + if x != nil { + return x.Update + } + return nil +} + +func (x *SynchronizeResponse) GetMore() bool { + if x != nil { + return x.More + } + return false +} + +type CreateContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pod of container being created. + Pod *PodSandbox `protobuf:"bytes,1,opt,name=pod,proto3" json:"pod,omitempty"` + // Container being created. + Container *Container `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` +} + +func (x *CreateContainerRequest) Reset() { + *x = CreateContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateContainerRequest) ProtoMessage() {} + +func (x *CreateContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateContainerRequest.ProtoReflect.Descriptor instead. +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{8} +} + +func (x *CreateContainerRequest) GetPod() *PodSandbox { + if x != nil { + return x.Pod + } + return nil +} + +func (x *CreateContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +type CreateContainerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Requested adjustments to container being created. + Adjust *ContainerAdjustment `protobuf:"bytes,1,opt,name=adjust,proto3" json:"adjust,omitempty"` + // Requested updates to other existing containers. + Update []*ContainerUpdate `protobuf:"bytes,2,rep,name=update,proto3" json:"update,omitempty"` + // Requested eviction of existing containers. + Evict []*ContainerEviction `protobuf:"bytes,3,rep,name=evict,proto3" json:"evict,omitempty"` +} + +func (x *CreateContainerResponse) Reset() { + *x = CreateContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateContainerResponse) ProtoMessage() {} + +func (x *CreateContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateContainerResponse.ProtoReflect.Descriptor instead. +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateContainerResponse) GetAdjust() *ContainerAdjustment { + if x != nil { + return x.Adjust + } + return nil +} + +func (x *CreateContainerResponse) GetUpdate() []*ContainerUpdate { + if x != nil { + return x.Update + } + return nil +} + +func (x *CreateContainerResponse) GetEvict() []*ContainerEviction { + if x != nil { + return x.Evict + } + return nil +} + +type UpdateContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pod of container being updated. + Pod *PodSandbox `protobuf:"bytes,1,opt,name=pod,proto3" json:"pod,omitempty"` + // Container being updated. + Container *Container `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` + // Resources to update. + LinuxResources *LinuxResources `protobuf:"bytes,3,opt,name=linux_resources,json=linuxResources,proto3" json:"linux_resources,omitempty"` +} + +func (x *UpdateContainerRequest) Reset() { + *x = UpdateContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateContainerRequest) ProtoMessage() {} + +func (x *UpdateContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateContainerRequest.ProtoReflect.Descriptor instead. +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateContainerRequest) GetPod() *PodSandbox { + if x != nil { + return x.Pod + } + return nil +} + +func (x *UpdateContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +func (x *UpdateContainerRequest) GetLinuxResources() *LinuxResources { + if x != nil { + return x.LinuxResources + } + return nil +} + +type UpdateContainerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Requested updates to containers. + Update []*ContainerUpdate `protobuf:"bytes,1,rep,name=update,proto3" json:"update,omitempty"` + // Requested eviction of containers. + Evict []*ContainerEviction `protobuf:"bytes,2,rep,name=evict,proto3" json:"evict,omitempty"` +} + +func (x *UpdateContainerResponse) Reset() { + *x = UpdateContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateContainerResponse) ProtoMessage() {} + +func (x *UpdateContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateContainerResponse.ProtoReflect.Descriptor instead. +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{11} +} + +func (x *UpdateContainerResponse) GetUpdate() []*ContainerUpdate { + if x != nil { + return x.Update + } + return nil +} + +func (x *UpdateContainerResponse) GetEvict() []*ContainerEviction { + if x != nil { + return x.Evict + } + return nil +} + +type StopContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pod of container being stopped. + Pod *PodSandbox `protobuf:"bytes,1,opt,name=pod,proto3" json:"pod,omitempty"` + // Container being stopped. + Container *Container `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` +} + +func (x *StopContainerRequest) Reset() { + *x = StopContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopContainerRequest) ProtoMessage() {} + +func (x *StopContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopContainerRequest.ProtoReflect.Descriptor instead. +func (*StopContainerRequest) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{12} +} + +func (x *StopContainerRequest) GetPod() *PodSandbox { + if x != nil { + return x.Pod + } + return nil +} + +func (x *StopContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +type StopContainerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Requested updates to containers. + Update []*ContainerUpdate `protobuf:"bytes,1,rep,name=update,proto3" json:"update,omitempty"` +} + +func (x *StopContainerResponse) Reset() { + *x = StopContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopContainerResponse) ProtoMessage() {} + +func (x *StopContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopContainerResponse.ProtoReflect.Descriptor instead. +func (*StopContainerResponse) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{13} +} + +func (x *StopContainerResponse) GetUpdate() []*ContainerUpdate { + if x != nil { + return x.Update + } + return nil +} + +type StateChangeEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Event type of notification. + Event Event `protobuf:"varint,1,opt,name=event,proto3,enum=nri.pkg.api.v1alpha1.Event" json:"event,omitempty"` + // Pod this notification is sent for. If this event is related to a container, + // pod is set to the pod of the container. + Pod *PodSandbox `protobuf:"bytes,2,opt,name=pod,proto3" json:"pod,omitempty"` + // Container this notification is sent for. If the event is related to a pod, + // container is nil. + Container *Container `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"` +} + +func (x *StateChangeEvent) Reset() { + *x = StateChangeEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateChangeEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateChangeEvent) ProtoMessage() {} + +func (x *StateChangeEvent) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateChangeEvent.ProtoReflect.Descriptor instead. +func (*StateChangeEvent) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{14} +} + +func (x *StateChangeEvent) GetEvent() Event { + if x != nil { + return x.Event + } + return Event_UNKNOWN +} + +func (x *StateChangeEvent) GetPod() *PodSandbox { + if x != nil { + return x.Pod + } + return nil +} + +func (x *StateChangeEvent) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +// Empty response for those *Requests that are semantically events. +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{15} +} + +// Pod metadata that is considered relevant for a plugin. +type PodSandbox struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Uid string `protobuf:"bytes,3,opt,name=uid,proto3" json:"uid,omitempty"` + Namespace string `protobuf:"bytes,4,opt,name=namespace,proto3" json:"namespace,omitempty"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,6,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RuntimeHandler string `protobuf:"bytes,7,opt,name=runtime_handler,json=runtimeHandler,proto3" json:"runtime_handler,omitempty"` + Linux *LinuxPodSandbox `protobuf:"bytes,8,opt,name=linux,proto3" json:"linux,omitempty"` + Pid uint32 `protobuf:"varint,9,opt,name=pid,proto3" json:"pid,omitempty"` // for NRI v1 emulation + Ips []string `protobuf:"bytes,10,rep,name=ips,proto3" json:"ips,omitempty"` +} + +func (x *PodSandbox) Reset() { + *x = PodSandbox{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PodSandbox) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PodSandbox) ProtoMessage() {} + +func (x *PodSandbox) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PodSandbox.ProtoReflect.Descriptor instead. +func (*PodSandbox) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{16} +} + +func (x *PodSandbox) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *PodSandbox) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PodSandbox) GetUid() string { + if x != nil { + return x.Uid + } + return "" +} + +func (x *PodSandbox) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *PodSandbox) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *PodSandbox) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *PodSandbox) GetRuntimeHandler() string { + if x != nil { + return x.RuntimeHandler + } + return "" +} + +func (x *PodSandbox) GetLinux() *LinuxPodSandbox { + if x != nil { + return x.Linux + } + return nil +} + +func (x *PodSandbox) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *PodSandbox) GetIps() []string { + if x != nil { + return x.Ips + } + return nil +} + +// PodSandbox linux-specific metadata +type LinuxPodSandbox struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PodOverhead *LinuxResources `protobuf:"bytes,1,opt,name=pod_overhead,json=podOverhead,proto3" json:"pod_overhead,omitempty"` + PodResources *LinuxResources `protobuf:"bytes,2,opt,name=pod_resources,json=podResources,proto3" json:"pod_resources,omitempty"` + CgroupParent string `protobuf:"bytes,3,opt,name=cgroup_parent,json=cgroupParent,proto3" json:"cgroup_parent,omitempty"` + CgroupsPath string `protobuf:"bytes,4,opt,name=cgroups_path,json=cgroupsPath,proto3" json:"cgroups_path,omitempty"` // for NRI v1 emulation + Namespaces []*LinuxNamespace `protobuf:"bytes,5,rep,name=namespaces,proto3" json:"namespaces,omitempty"` // for NRI v1 emulation + Resources *LinuxResources `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources,omitempty"` // for NRI v1 emulation +} + +func (x *LinuxPodSandbox) Reset() { + *x = LinuxPodSandbox{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxPodSandbox) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxPodSandbox) ProtoMessage() {} + +func (x *LinuxPodSandbox) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxPodSandbox.ProtoReflect.Descriptor instead. +func (*LinuxPodSandbox) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{17} +} + +func (x *LinuxPodSandbox) GetPodOverhead() *LinuxResources { + if x != nil { + return x.PodOverhead + } + return nil +} + +func (x *LinuxPodSandbox) GetPodResources() *LinuxResources { + if x != nil { + return x.PodResources + } + return nil +} + +func (x *LinuxPodSandbox) GetCgroupParent() string { + if x != nil { + return x.CgroupParent + } + return "" +} + +func (x *LinuxPodSandbox) GetCgroupsPath() string { + if x != nil { + return x.CgroupsPath + } + return "" +} + +func (x *LinuxPodSandbox) GetNamespaces() []*LinuxNamespace { + if x != nil { + return x.Namespaces + } + return nil +} + +func (x *LinuxPodSandbox) GetResources() *LinuxResources { + if x != nil { + return x.Resources + } + return nil +} + +// Container metadata that is considered relevant for a plugin. +type Container struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + PodSandboxId string `protobuf:"bytes,2,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + State ContainerState `protobuf:"varint,4,opt,name=state,proto3,enum=nri.pkg.api.v1alpha1.ContainerState" json:"state,omitempty"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,6,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Args []string `protobuf:"bytes,7,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,8,rep,name=env,proto3" json:"env,omitempty"` + Mounts []*Mount `protobuf:"bytes,9,rep,name=mounts,proto3" json:"mounts,omitempty"` + Hooks *Hooks `protobuf:"bytes,10,opt,name=hooks,proto3" json:"hooks,omitempty"` + Linux *LinuxContainer `protobuf:"bytes,11,opt,name=linux,proto3" json:"linux,omitempty"` + Pid uint32 `protobuf:"varint,12,opt,name=pid,proto3" json:"pid,omitempty"` // for NRI v1 emulation + Rlimits []*POSIXRlimit `protobuf:"bytes,13,rep,name=rlimits,proto3" json:"rlimits,omitempty"` +} + +func (x *Container) Reset() { + *x = Container{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Container) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Container) ProtoMessage() {} + +func (x *Container) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Container.ProtoReflect.Descriptor instead. +func (*Container) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{18} +} + +func (x *Container) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Container) GetPodSandboxId() string { + if x != nil { + return x.PodSandboxId + } + return "" +} + +func (x *Container) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Container) GetState() ContainerState { + if x != nil { + return x.State + } + return ContainerState_CONTAINER_UNKNOWN +} + +func (x *Container) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Container) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *Container) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +func (x *Container) GetEnv() []string { + if x != nil { + return x.Env + } + return nil +} + +func (x *Container) GetMounts() []*Mount { + if x != nil { + return x.Mounts + } + return nil +} + +func (x *Container) GetHooks() *Hooks { + if x != nil { + return x.Hooks + } + return nil +} + +func (x *Container) GetLinux() *LinuxContainer { + if x != nil { + return x.Linux + } + return nil +} + +func (x *Container) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *Container) GetRlimits() []*POSIXRlimit { + if x != nil { + return x.Rlimits + } + return nil +} + +// A container mount. +type Mount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` + Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` +} + +func (x *Mount) Reset() { + *x = Mount{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mount) ProtoMessage() {} + +func (x *Mount) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mount.ProtoReflect.Descriptor instead. +func (*Mount) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{19} +} + +func (x *Mount) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +func (x *Mount) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Mount) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *Mount) GetOptions() []string { + if x != nil { + return x.Options + } + return nil +} + +// Container OCI hooks. +type Hooks struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Prestart []*Hook `protobuf:"bytes,1,rep,name=prestart,proto3" json:"prestart,omitempty"` + CreateRuntime []*Hook `protobuf:"bytes,2,rep,name=create_runtime,json=createRuntime,proto3" json:"create_runtime,omitempty"` + CreateContainer []*Hook `protobuf:"bytes,3,rep,name=create_container,json=createContainer,proto3" json:"create_container,omitempty"` + StartContainer []*Hook `protobuf:"bytes,4,rep,name=start_container,json=startContainer,proto3" json:"start_container,omitempty"` + Poststart []*Hook `protobuf:"bytes,5,rep,name=poststart,proto3" json:"poststart,omitempty"` + Poststop []*Hook `protobuf:"bytes,6,rep,name=poststop,proto3" json:"poststop,omitempty"` +} + +func (x *Hooks) Reset() { + *x = Hooks{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Hooks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Hooks) ProtoMessage() {} + +func (x *Hooks) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Hooks.ProtoReflect.Descriptor instead. +func (*Hooks) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{20} +} + +func (x *Hooks) GetPrestart() []*Hook { + if x != nil { + return x.Prestart + } + return nil +} + +func (x *Hooks) GetCreateRuntime() []*Hook { + if x != nil { + return x.CreateRuntime + } + return nil +} + +func (x *Hooks) GetCreateContainer() []*Hook { + if x != nil { + return x.CreateContainer + } + return nil +} + +func (x *Hooks) GetStartContainer() []*Hook { + if x != nil { + return x.StartContainer + } + return nil +} + +func (x *Hooks) GetPoststart() []*Hook { + if x != nil { + return x.Poststart + } + return nil +} + +func (x *Hooks) GetPoststop() []*Hook { + if x != nil { + return x.Poststop + } + return nil +} + +// One OCI hook. +type Hook struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,3,rep,name=env,proto3" json:"env,omitempty"` + Timeout *OptionalInt `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *Hook) Reset() { + *x = Hook{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Hook) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Hook) ProtoMessage() {} + +func (x *Hook) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Hook.ProtoReflect.Descriptor instead. +func (*Hook) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{21} +} + +func (x *Hook) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *Hook) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +func (x *Hook) GetEnv() []string { + if x != nil { + return x.Env + } + return nil +} + +func (x *Hook) GetTimeout() *OptionalInt { + if x != nil { + return x.Timeout + } + return nil +} + +// Container (linux) metadata. +type LinuxContainer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespaces []*LinuxNamespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"` + Devices []*LinuxDevice `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"` + Resources *LinuxResources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + OomScoreAdj *OptionalInt `protobuf:"bytes,4,opt,name=oom_score_adj,json=oomScoreAdj,proto3" json:"oom_score_adj,omitempty"` + CgroupsPath string `protobuf:"bytes,5,opt,name=cgroups_path,json=cgroupsPath,proto3" json:"cgroups_path,omitempty"` +} + +func (x *LinuxContainer) Reset() { + *x = LinuxContainer{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxContainer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxContainer) ProtoMessage() {} + +func (x *LinuxContainer) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxContainer.ProtoReflect.Descriptor instead. +func (*LinuxContainer) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{22} +} + +func (x *LinuxContainer) GetNamespaces() []*LinuxNamespace { + if x != nil { + return x.Namespaces + } + return nil +} + +func (x *LinuxContainer) GetDevices() []*LinuxDevice { + if x != nil { + return x.Devices + } + return nil +} + +func (x *LinuxContainer) GetResources() *LinuxResources { + if x != nil { + return x.Resources + } + return nil +} + +func (x *LinuxContainer) GetOomScoreAdj() *OptionalInt { + if x != nil { + return x.OomScoreAdj + } + return nil +} + +func (x *LinuxContainer) GetCgroupsPath() string { + if x != nil { + return x.CgroupsPath + } + return "" +} + +// A linux namespace. +type LinuxNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *LinuxNamespace) Reset() { + *x = LinuxNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxNamespace) ProtoMessage() {} + +func (x *LinuxNamespace) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxNamespace.ProtoReflect.Descriptor instead. +func (*LinuxNamespace) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{23} +} + +func (x *LinuxNamespace) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *LinuxNamespace) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +// A container (linux) device. +type LinuxDevice struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Major int64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + FileMode *OptionalFileMode `protobuf:"bytes,5,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"` + Uid *OptionalUInt32 `protobuf:"bytes,6,opt,name=uid,proto3" json:"uid,omitempty"` + Gid *OptionalUInt32 `protobuf:"bytes,7,opt,name=gid,proto3" json:"gid,omitempty"` +} + +func (x *LinuxDevice) Reset() { + *x = LinuxDevice{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxDevice) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxDevice) ProtoMessage() {} + +func (x *LinuxDevice) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxDevice.ProtoReflect.Descriptor instead. +func (*LinuxDevice) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{24} +} + +func (x *LinuxDevice) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *LinuxDevice) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *LinuxDevice) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *LinuxDevice) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *LinuxDevice) GetFileMode() *OptionalFileMode { + if x != nil { + return x.FileMode + } + return nil +} + +func (x *LinuxDevice) GetUid() *OptionalUInt32 { + if x != nil { + return x.Uid + } + return nil +} + +func (x *LinuxDevice) GetGid() *OptionalUInt32 { + if x != nil { + return x.Gid + } + return nil +} + +// A linux device cgroup controller rule. +type LinuxDeviceCgroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Allow bool `protobuf:"varint,1,opt,name=allow,proto3" json:"allow,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Major *OptionalInt64 `protobuf:"bytes,3,opt,name=major,proto3" json:"major,omitempty"` + Minor *OptionalInt64 `protobuf:"bytes,4,opt,name=minor,proto3" json:"minor,omitempty"` + Access string `protobuf:"bytes,5,opt,name=access,proto3" json:"access,omitempty"` +} + +func (x *LinuxDeviceCgroup) Reset() { + *x = LinuxDeviceCgroup{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxDeviceCgroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxDeviceCgroup) ProtoMessage() {} + +func (x *LinuxDeviceCgroup) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxDeviceCgroup.ProtoReflect.Descriptor instead. +func (*LinuxDeviceCgroup) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{25} +} + +func (x *LinuxDeviceCgroup) GetAllow() bool { + if x != nil { + return x.Allow + } + return false +} + +func (x *LinuxDeviceCgroup) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *LinuxDeviceCgroup) GetMajor() *OptionalInt64 { + if x != nil { + return x.Major + } + return nil +} + +func (x *LinuxDeviceCgroup) GetMinor() *OptionalInt64 { + if x != nil { + return x.Minor + } + return nil +} + +func (x *LinuxDeviceCgroup) GetAccess() string { + if x != nil { + return x.Access + } + return "" +} + +// A CDI device reference. +type CDIDevice struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *CDIDevice) Reset() { + *x = CDIDevice{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CDIDevice) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CDIDevice) ProtoMessage() {} + +func (x *CDIDevice) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CDIDevice.ProtoReflect.Descriptor instead. +func (*CDIDevice) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{26} +} + +func (x *CDIDevice) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Container (linux) resources. +type LinuxResources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Memory *LinuxMemory `protobuf:"bytes,1,opt,name=memory,proto3" json:"memory,omitempty"` + Cpu *LinuxCPU `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty"` + HugepageLimits []*HugepageLimit `protobuf:"bytes,3,rep,name=hugepage_limits,json=hugepageLimits,proto3" json:"hugepage_limits,omitempty"` + BlockioClass *OptionalString `protobuf:"bytes,4,opt,name=blockio_class,json=blockioClass,proto3" json:"blockio_class,omitempty"` + RdtClass *OptionalString `protobuf:"bytes,5,opt,name=rdt_class,json=rdtClass,proto3" json:"rdt_class,omitempty"` + Unified map[string]string `protobuf:"bytes,6,rep,name=unified,proto3" json:"unified,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Devices []*LinuxDeviceCgroup `protobuf:"bytes,7,rep,name=devices,proto3" json:"devices,omitempty"` // for NRI v1 emulation + Pids *LinuxPids `protobuf:"bytes,8,opt,name=pids,proto3" json:"pids,omitempty"` +} + +func (x *LinuxResources) Reset() { + *x = LinuxResources{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxResources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxResources) ProtoMessage() {} + +func (x *LinuxResources) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxResources.ProtoReflect.Descriptor instead. +func (*LinuxResources) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{27} +} + +func (x *LinuxResources) GetMemory() *LinuxMemory { + if x != nil { + return x.Memory + } + return nil +} + +func (x *LinuxResources) GetCpu() *LinuxCPU { + if x != nil { + return x.Cpu + } + return nil +} + +func (x *LinuxResources) GetHugepageLimits() []*HugepageLimit { + if x != nil { + return x.HugepageLimits + } + return nil +} + +func (x *LinuxResources) GetBlockioClass() *OptionalString { + if x != nil { + return x.BlockioClass + } + return nil +} + +func (x *LinuxResources) GetRdtClass() *OptionalString { + if x != nil { + return x.RdtClass + } + return nil +} + +func (x *LinuxResources) GetUnified() map[string]string { + if x != nil { + return x.Unified + } + return nil +} + +func (x *LinuxResources) GetDevices() []*LinuxDeviceCgroup { + if x != nil { + return x.Devices + } + return nil +} + +func (x *LinuxResources) GetPids() *LinuxPids { + if x != nil { + return x.Pids + } + return nil +} + +// Memory-related parts of (linux) resources. +type LinuxMemory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Limit *OptionalInt64 `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` + Reservation *OptionalInt64 `protobuf:"bytes,2,opt,name=reservation,proto3" json:"reservation,omitempty"` + Swap *OptionalInt64 `protobuf:"bytes,3,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *OptionalInt64 `protobuf:"bytes,4,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTcp *OptionalInt64 `protobuf:"bytes,5,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` + Swappiness *OptionalUInt64 `protobuf:"bytes,6,opt,name=swappiness,proto3" json:"swappiness,omitempty"` + DisableOomKiller *OptionalBool `protobuf:"bytes,7,opt,name=disable_oom_killer,json=disableOomKiller,proto3" json:"disable_oom_killer,omitempty"` + UseHierarchy *OptionalBool `protobuf:"bytes,8,opt,name=use_hierarchy,json=useHierarchy,proto3" json:"use_hierarchy,omitempty"` +} + +func (x *LinuxMemory) Reset() { + *x = LinuxMemory{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxMemory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxMemory) ProtoMessage() {} + +func (x *LinuxMemory) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxMemory.ProtoReflect.Descriptor instead. +func (*LinuxMemory) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{28} +} + +func (x *LinuxMemory) GetLimit() *OptionalInt64 { + if x != nil { + return x.Limit + } + return nil +} + +func (x *LinuxMemory) GetReservation() *OptionalInt64 { + if x != nil { + return x.Reservation + } + return nil +} + +func (x *LinuxMemory) GetSwap() *OptionalInt64 { + if x != nil { + return x.Swap + } + return nil +} + +func (x *LinuxMemory) GetKernel() *OptionalInt64 { + if x != nil { + return x.Kernel + } + return nil +} + +func (x *LinuxMemory) GetKernelTcp() *OptionalInt64 { + if x != nil { + return x.KernelTcp + } + return nil +} + +func (x *LinuxMemory) GetSwappiness() *OptionalUInt64 { + if x != nil { + return x.Swappiness + } + return nil +} + +func (x *LinuxMemory) GetDisableOomKiller() *OptionalBool { + if x != nil { + return x.DisableOomKiller + } + return nil +} + +func (x *LinuxMemory) GetUseHierarchy() *OptionalBool { + if x != nil { + return x.UseHierarchy + } + return nil +} + +// CPU-related parts of (linux) resources. +type LinuxCPU struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shares *OptionalUInt64 `protobuf:"bytes,1,opt,name=shares,proto3" json:"shares,omitempty"` + Quota *OptionalInt64 `protobuf:"bytes,2,opt,name=quota,proto3" json:"quota,omitempty"` + Period *OptionalUInt64 `protobuf:"bytes,3,opt,name=period,proto3" json:"period,omitempty"` + RealtimeRuntime *OptionalInt64 `protobuf:"bytes,4,opt,name=realtime_runtime,json=realtimeRuntime,proto3" json:"realtime_runtime,omitempty"` + RealtimePeriod *OptionalUInt64 `protobuf:"bytes,5,opt,name=realtime_period,json=realtimePeriod,proto3" json:"realtime_period,omitempty"` + Cpus string `protobuf:"bytes,6,opt,name=cpus,proto3" json:"cpus,omitempty"` + Mems string `protobuf:"bytes,7,opt,name=mems,proto3" json:"mems,omitempty"` +} + +func (x *LinuxCPU) Reset() { + *x = LinuxCPU{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxCPU) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxCPU) ProtoMessage() {} + +func (x *LinuxCPU) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxCPU.ProtoReflect.Descriptor instead. +func (*LinuxCPU) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{29} +} + +func (x *LinuxCPU) GetShares() *OptionalUInt64 { + if x != nil { + return x.Shares + } + return nil +} + +func (x *LinuxCPU) GetQuota() *OptionalInt64 { + if x != nil { + return x.Quota + } + return nil +} + +func (x *LinuxCPU) GetPeriod() *OptionalUInt64 { + if x != nil { + return x.Period + } + return nil +} + +func (x *LinuxCPU) GetRealtimeRuntime() *OptionalInt64 { + if x != nil { + return x.RealtimeRuntime + } + return nil +} + +func (x *LinuxCPU) GetRealtimePeriod() *OptionalUInt64 { + if x != nil { + return x.RealtimePeriod + } + return nil +} + +func (x *LinuxCPU) GetCpus() string { + if x != nil { + return x.Cpus + } + return "" +} + +func (x *LinuxCPU) GetMems() string { + if x != nil { + return x.Mems + } + return "" +} + +// Container huge page limit. +type HugepageLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PageSize string `protobuf:"bytes,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *HugepageLimit) Reset() { + *x = HugepageLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HugepageLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HugepageLimit) ProtoMessage() {} + +func (x *HugepageLimit) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HugepageLimit.ProtoReflect.Descriptor instead. +func (*HugepageLimit) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{30} +} + +func (x *HugepageLimit) GetPageSize() string { + if x != nil { + return x.PageSize + } + return "" +} + +func (x *HugepageLimit) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +// Container rlimits +type POSIXRlimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Hard uint64 `protobuf:"varint,2,opt,name=hard,proto3" json:"hard,omitempty"` + Soft uint64 `protobuf:"varint,3,opt,name=soft,proto3" json:"soft,omitempty"` +} + +func (x *POSIXRlimit) Reset() { + *x = POSIXRlimit{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *POSIXRlimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*POSIXRlimit) ProtoMessage() {} + +func (x *POSIXRlimit) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use POSIXRlimit.ProtoReflect.Descriptor instead. +func (*POSIXRlimit) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{31} +} + +func (x *POSIXRlimit) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *POSIXRlimit) GetHard() uint64 { + if x != nil { + return x.Hard + } + return 0 +} + +func (x *POSIXRlimit) GetSoft() uint64 { + if x != nil { + return x.Soft + } + return 0 +} + +// Pids-related parts of (linux) resources. +type LinuxPids struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *LinuxPids) Reset() { + *x = LinuxPids{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxPids) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxPids) ProtoMessage() {} + +func (x *LinuxPids) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxPids.ProtoReflect.Descriptor instead. +func (*LinuxPids) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{32} +} + +func (x *LinuxPids) GetLimit() int64 { + if x != nil { + return x.Limit + } + return 0 +} + +// Requested adjustments to a container being created. +type ContainerAdjustment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Mounts []*Mount `protobuf:"bytes,3,rep,name=mounts,proto3" json:"mounts,omitempty"` + Env []*KeyValue `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty"` + Hooks *Hooks `protobuf:"bytes,5,opt,name=hooks,proto3" json:"hooks,omitempty"` + Linux *LinuxContainerAdjustment `protobuf:"bytes,6,opt,name=linux,proto3" json:"linux,omitempty"` + Rlimits []*POSIXRlimit `protobuf:"bytes,7,rep,name=rlimits,proto3" json:"rlimits,omitempty"` + CDIDevices []*CDIDevice `protobuf:"bytes,8,rep,name=CDI_devices,json=CDIDevices,proto3" json:"CDI_devices,omitempty"` +} + +func (x *ContainerAdjustment) Reset() { + *x = ContainerAdjustment{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerAdjustment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerAdjustment) ProtoMessage() {} + +func (x *ContainerAdjustment) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerAdjustment.ProtoReflect.Descriptor instead. +func (*ContainerAdjustment) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{33} +} + +func (x *ContainerAdjustment) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *ContainerAdjustment) GetMounts() []*Mount { + if x != nil { + return x.Mounts + } + return nil +} + +func (x *ContainerAdjustment) GetEnv() []*KeyValue { + if x != nil { + return x.Env + } + return nil +} + +func (x *ContainerAdjustment) GetHooks() *Hooks { + if x != nil { + return x.Hooks + } + return nil +} + +func (x *ContainerAdjustment) GetLinux() *LinuxContainerAdjustment { + if x != nil { + return x.Linux + } + return nil +} + +func (x *ContainerAdjustment) GetRlimits() []*POSIXRlimit { + if x != nil { + return x.Rlimits + } + return nil +} + +func (x *ContainerAdjustment) GetCDIDevices() []*CDIDevice { + if x != nil { + return x.CDIDevices + } + return nil +} + +// Adjustments to (linux) resources. +type LinuxContainerAdjustment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Devices []*LinuxDevice `protobuf:"bytes,1,rep,name=devices,proto3" json:"devices,omitempty"` + Resources *LinuxResources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + CgroupsPath string `protobuf:"bytes,3,opt,name=cgroups_path,json=cgroupsPath,proto3" json:"cgroups_path,omitempty"` + OomScoreAdj *OptionalInt `protobuf:"bytes,4,opt,name=oom_score_adj,json=oomScoreAdj,proto3" json:"oom_score_adj,omitempty"` +} + +func (x *LinuxContainerAdjustment) Reset() { + *x = LinuxContainerAdjustment{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxContainerAdjustment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxContainerAdjustment) ProtoMessage() {} + +func (x *LinuxContainerAdjustment) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxContainerAdjustment.ProtoReflect.Descriptor instead. +func (*LinuxContainerAdjustment) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{34} +} + +func (x *LinuxContainerAdjustment) GetDevices() []*LinuxDevice { + if x != nil { + return x.Devices + } + return nil +} + +func (x *LinuxContainerAdjustment) GetResources() *LinuxResources { + if x != nil { + return x.Resources + } + return nil +} + +func (x *LinuxContainerAdjustment) GetCgroupsPath() string { + if x != nil { + return x.CgroupsPath + } + return "" +} + +func (x *LinuxContainerAdjustment) GetOomScoreAdj() *OptionalInt { + if x != nil { + return x.OomScoreAdj + } + return nil +} + +// Requested update to an already created container. +type ContainerUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Linux *LinuxContainerUpdate `protobuf:"bytes,2,opt,name=linux,proto3" json:"linux,omitempty"` + IgnoreFailure bool `protobuf:"varint,3,opt,name=ignore_failure,json=ignoreFailure,proto3" json:"ignore_failure,omitempty"` +} + +func (x *ContainerUpdate) Reset() { + *x = ContainerUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerUpdate) ProtoMessage() {} + +func (x *ContainerUpdate) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerUpdate.ProtoReflect.Descriptor instead. +func (*ContainerUpdate) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{35} +} + +func (x *ContainerUpdate) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} + +func (x *ContainerUpdate) GetLinux() *LinuxContainerUpdate { + if x != nil { + return x.Linux + } + return nil +} + +func (x *ContainerUpdate) GetIgnoreFailure() bool { + if x != nil { + return x.IgnoreFailure + } + return false +} + +// Updates to (linux) resources. +type LinuxContainerUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resources *LinuxResources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *LinuxContainerUpdate) Reset() { + *x = LinuxContainerUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinuxContainerUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinuxContainerUpdate) ProtoMessage() {} + +func (x *LinuxContainerUpdate) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinuxContainerUpdate.ProtoReflect.Descriptor instead. +func (*LinuxContainerUpdate) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{36} +} + +func (x *LinuxContainerUpdate) GetResources() *LinuxResources { + if x != nil { + return x.Resources + } + return nil +} + +// Request to evict (IOW unsolicitedly stop) a container. +type ContainerEviction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container to evict. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Human-readable reason for eviction. + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *ContainerEviction) Reset() { + *x = ContainerEviction{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerEviction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerEviction) ProtoMessage() {} + +func (x *ContainerEviction) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerEviction.ProtoReflect.Descriptor instead. +func (*ContainerEviction) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{37} +} + +func (x *ContainerEviction) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} + +func (x *ContainerEviction) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +// KeyValue represents an environment variable. +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{38} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// An optional string value. +type OptionalString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalString) Reset() { + *x = OptionalString{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalString) ProtoMessage() {} + +func (x *OptionalString) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalString.ProtoReflect.Descriptor instead. +func (*OptionalString) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{39} +} + +func (x *OptionalString) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// An optional signed integer value. +type OptionalInt struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalInt) Reset() { + *x = OptionalInt{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalInt) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalInt) ProtoMessage() {} + +func (x *OptionalInt) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalInt.ProtoReflect.Descriptor instead. +func (*OptionalInt) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{40} +} + +func (x *OptionalInt) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// An optional 32-bit signed integer value. +type OptionalInt32 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalInt32) Reset() { + *x = OptionalInt32{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalInt32) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalInt32) ProtoMessage() {} + +func (x *OptionalInt32) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalInt32.ProtoReflect.Descriptor instead. +func (*OptionalInt32) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{41} +} + +func (x *OptionalInt32) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// An optional 32-bit unsigned integer value. +type OptionalUInt32 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalUInt32) Reset() { + *x = OptionalUInt32{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalUInt32) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalUInt32) ProtoMessage() {} + +func (x *OptionalUInt32) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalUInt32.ProtoReflect.Descriptor instead. +func (*OptionalUInt32) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{42} +} + +func (x *OptionalUInt32) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// An optional 64-bit signed integer value. +type OptionalInt64 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalInt64) Reset() { + *x = OptionalInt64{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalInt64) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalInt64) ProtoMessage() {} + +func (x *OptionalInt64) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalInt64.ProtoReflect.Descriptor instead. +func (*OptionalInt64) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{43} +} + +func (x *OptionalInt64) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// An optional 64-bit unsigned integer value. +type OptionalUInt64 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalUInt64) Reset() { + *x = OptionalUInt64{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalUInt64) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalUInt64) ProtoMessage() {} + +func (x *OptionalUInt64) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalUInt64.ProtoReflect.Descriptor instead. +func (*OptionalUInt64) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{44} +} + +func (x *OptionalUInt64) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// An optional boolean value. +type OptionalBool struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalBool) Reset() { + *x = OptionalBool{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalBool) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalBool) ProtoMessage() {} + +func (x *OptionalBool) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalBool.ProtoReflect.Descriptor instead. +func (*OptionalBool) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{45} +} + +func (x *OptionalBool) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// An optional value of file permissions. +type OptionalFileMode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OptionalFileMode) Reset() { + *x = OptionalFileMode{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_api_api_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OptionalFileMode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OptionalFileMode) ProtoMessage() {} + +func (x *OptionalFileMode) ProtoReflect() protoreflect.Message { + mi := &file_pkg_api_api_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OptionalFileMode.ProtoReflect.Descriptor instead. +func (*OptionalFileMode) Descriptor() ([]byte, []int) { + return file_pkg_api_api_proto_rawDescGZIP(), []int{46} +} + +func (x *OptionalFileMode) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +var File_pkg_api_api_proto protoreflect.FileDescriptor + +var file_pkg_api_api_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x22, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, + 0x64, 0x78, 0x22, 0x97, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, + 0x05, 0x65, 0x76, 0x69, 0x63, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6e, + 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x69, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x65, 0x76, 0x69, 0x63, 0x74, 0x22, 0x59, 0x0a, 0x18, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x22, 0xbe, 0x01, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x3c, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, + 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x60, 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x15, 0x0a, 0x11, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, + 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x45, 0x56, 0x45, 0x4c, + 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x45, 0x56, 0x45, 0x4c, + 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x45, 0x56, 0x45, 0x4c, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x22, 0xd2, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x13, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x2b, 0x0a, + 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x9f, 0x01, 0x0a, 0x12, 0x53, + 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x34, 0x0a, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x52, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x0a, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x72, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6d, 0x6f, 0x72, 0x65, 0x22, 0x68, 0x0a, 0x13, + 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x04, 0x6d, 0x6f, 0x72, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x32, 0x0a, 0x03, 0x70, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x52, 0x03, 0x70, 0x6f, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x41, 0x0a, 0x06, 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x61, 0x64, 0x6a, + 0x75, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x05, 0x65, 0x76, 0x69, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x45, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x65, 0x76, 0x69, 0x63, + 0x74, 0x22, 0xda, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x03, + 0x70, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x72, 0x69, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x03, 0x70, 0x6f, 0x64, + 0x12, 0x3d, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, + 0x4d, 0x0a, 0x0f, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x0e, + 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x97, + 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x72, 0x69, + 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x05, 0x65, 0x76, 0x69, + 0x63, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x05, 0x65, 0x76, 0x69, 0x63, 0x74, 0x22, 0x89, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x6f, + 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x32, 0x0a, 0x03, 0x70, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x52, 0x03, 0x70, 0x6f, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x22, 0x56, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, + 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xb8, 0x01, 0x0a, + 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x31, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x03, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x03, 0x70, 0x6f, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x80, 0x04, 0x0a, 0x0a, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x75, 0x78, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, + 0x6e, 0x75, 0x78, 0x50, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x75, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xf7, 0x02, 0x0a, 0x0f, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x50, 0x6f, 0x64, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x47, 0x0a, 0x0c, 0x70, 0x6f, 0x64, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x52, 0x0b, 0x70, 0x6f, 0x64, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, + 0x12, 0x49, 0x0a, 0x0d, 0x70, 0x6f, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, + 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x0c, 0x70, + 0x6f, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x44, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, + 0x69, 0x6e, 0x75, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, + 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0xbe, 0x05, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x70, + 0x6f, 0x64, 0x5f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x6f, 0x64, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, + 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x52, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, + 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, + 0x12, 0x33, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x68, 0x6f, 0x6f, 0x6b, 0x73, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, + 0x73, 0x52, 0x05, 0x68, 0x6f, 0x6f, 0x6b, 0x73, 0x12, 0x3a, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x75, + 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, + 0x69, 0x6e, 0x75, 0x78, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x75, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, + 0x4f, 0x53, 0x49, 0x58, 0x52, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x72, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, + 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6f, + 0x0a, 0x05, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x80, 0x03, 0x0a, 0x05, 0x48, 0x6f, 0x6f, 0x6b, 0x73, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x72, 0x65, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x08, 0x70, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x41, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x72, 0x69, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x0f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0f, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, + 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x74, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, + 0x09, 0x70, 0x6f, 0x73, 0x74, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6f, + 0x73, 0x74, 0x73, 0x74, 0x6f, 0x70, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, + 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x74, 0x73, 0x74, + 0x6f, 0x70, 0x22, 0x7d, 0x0a, 0x04, 0x48, 0x6f, 0x6f, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, + 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x03, 0x65, 0x6e, 0x76, 0x12, 0x3b, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x22, 0xc1, 0x02, 0x0a, 0x0e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x64, 0x65, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, + 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, + 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x6f, + 0x6f, 0x6d, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x61, 0x64, 0x6a, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x52, 0x0b, 0x6f, 0x6f, 0x6d, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x41, + 0x64, 0x6a, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x50, 0x61, 0x74, 0x68, 0x22, 0x38, 0x0a, 0x0e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, + 0x96, 0x02, 0x0a, 0x0b, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, + 0x6e, 0x6f, 0x72, 0x12, 0x43, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x08, + 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x36, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x03, 0x75, 0x69, 0x64, + 0x12, 0x36, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x52, 0x03, 0x67, 0x69, 0x64, 0x22, 0xcb, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x6e, + 0x75, 0x78, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x14, + 0x0a, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x05, 0x6d, 0x61, + 0x6a, 0x6f, 0x72, 0x12, 0x39, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x16, + 0x0a, 0x06, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x1f, 0x0a, 0x09, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xda, 0x04, 0x0a, 0x0e, 0x4c, 0x69, 0x6e, 0x75, + 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x06, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, 0x69, + 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x43, + 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x4c, 0x0a, 0x0f, 0x68, 0x75, 0x67, 0x65, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x75, 0x67, 0x65, 0x70, 0x61, 0x67, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0e, 0x68, 0x75, 0x67, 0x65, 0x70, 0x61, 0x67, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x69, 0x6f, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, + 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x69, 0x6f, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x41, 0x0a, 0x09, 0x72, 0x64, 0x74, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x72, 0x64, 0x74, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, + 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x75, 0x6e, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x12, 0x41, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x65, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x07, 0x64, 0x65, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x04, 0x70, 0x69, 0x64, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x50, 0x69, + 0x64, 0x73, 0x52, 0x04, 0x70, 0x69, 0x64, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x55, 0x6e, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xaa, 0x04, 0x0a, 0x0b, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x45, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x04, 0x73, 0x77, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x04, 0x73, 0x77, 0x61, 0x70, 0x12, + 0x3b, 0x0a, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x42, 0x0a, 0x0a, + 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x09, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x63, 0x70, + 0x12, 0x44, 0x0a, 0x0a, 0x73, 0x77, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x0a, 0x73, 0x77, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6f, 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4f, + 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x5f, + 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, + 0x6f, 0x6f, 0x6c, 0x52, 0x0c, 0x75, 0x73, 0x65, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, + 0x79, 0x22, 0x88, 0x03, 0x0a, 0x08, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x43, 0x50, 0x55, 0x12, 0x3c, + 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x05, + 0x71, 0x75, 0x6f, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x52, 0x05, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0f, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x52, 0x0e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x70, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x63, 0x70, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x6d, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x65, 0x6d, 0x73, 0x22, 0x42, 0x0a, 0x0d, + 0x48, 0x75, 0x67, 0x65, 0x70, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x4f, 0x53, 0x49, 0x58, 0x52, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6f, 0x66, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x6f, 0x66, 0x74, 0x22, 0x21, 0x0a, 0x09, 0x4c, + 0x69, 0x6e, 0x75, 0x78, 0x50, 0x69, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x92, + 0x04, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x6a, 0x75, + 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x6a, 0x75, + 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x03, 0x65, 0x6e, 0x76, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x31, 0x0a, 0x05, 0x68, + 0x6f, 0x6f, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x72, 0x69, + 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x48, 0x6f, 0x6f, 0x6b, 0x73, 0x52, 0x05, 0x68, 0x6f, 0x6f, 0x6b, 0x73, 0x12, 0x44, + 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x75, 0x78, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x4f, 0x53, + 0x49, 0x58, 0x52, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x72, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x43, 0x44, 0x49, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x44, + 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0a, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x85, 0x02, 0x0a, 0x18, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, + 0x12, 0x3b, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x65, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x42, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x45, 0x0a, 0x0d, 0x6f, 0x6f, 0x6d, 0x5f, 0x73, 0x63, 0x6f, 0x72, + 0x65, 0x5f, 0x61, 0x64, 0x6a, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x52, 0x0b, + 0x6f, 0x6f, 0x6d, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x41, 0x64, 0x6a, 0x22, 0x9d, 0x01, 0x0a, 0x0f, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x40, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x75, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x66, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x22, 0x5a, 0x0a, 0x14, 0x4c, + 0x69, 0x6e, 0x75, 0x78, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, + 0x6e, 0x75, 0x78, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x4e, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x26, 0x0a, 0x0e, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, + 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x26, 0x0a, 0x0e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x26, + 0x0a, 0x0e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x24, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x42, 0x6f, 0x6f, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x28, 0x0a, 0x10, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, 0x9c, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x52, 0x55, 0x4e, 0x5f, 0x50, 0x4f, 0x44, 0x5f, 0x53, 0x41, 0x4e, 0x44, 0x42, 0x4f, 0x58, + 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x4f, 0x50, 0x5f, 0x50, 0x4f, 0x44, 0x5f, 0x53, + 0x41, 0x4e, 0x44, 0x42, 0x4f, 0x58, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x4d, 0x4f, + 0x56, 0x45, 0x5f, 0x50, 0x4f, 0x44, 0x5f, 0x53, 0x41, 0x4e, 0x44, 0x42, 0x4f, 0x58, 0x10, 0x03, + 0x12, 0x14, 0x0a, 0x10, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, + 0x49, 0x4e, 0x45, 0x52, 0x10, 0x04, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x10, + 0x05, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, + 0x49, 0x4e, 0x45, 0x52, 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x10, 0x07, + 0x12, 0x14, 0x0a, 0x10, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, + 0x49, 0x4e, 0x45, 0x52, 0x10, 0x08, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x55, + 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x10, + 0x09, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x4f, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, + 0x4e, 0x45, 0x52, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x5f, + 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x4c, + 0x41, 0x53, 0x54, 0x10, 0x0c, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x54, + 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, + 0x4e, 0x45, 0x52, 0x5f, 0x50, 0x41, 0x55, 0x53, 0x45, 0x44, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, + 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, + 0x47, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, + 0x5f, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x04, 0x32, 0xd8, 0x01, 0x0a, 0x07, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x5a, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x2b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x71, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xae, 0x05, 0x0a, 0x06, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x12, 0x5c, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x26, 0x2e, + 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, + 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x12, 0x28, 0x2e, + 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, + 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x1b, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x6e, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x2c, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x6e, 0x72, 0x69, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x2c, 0x2e, 0x6e, 0x72, + 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x6e, 0x72, 0x69, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x0d, 0x53, 0x74, 0x6f, 0x70, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x2a, 0x2e, 0x6e, 0x72, 0x69, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x26, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x57, 0x0a, 0x0d, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x20, + 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x6e, 0x72, 0x69, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, + 0x27, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x6e, 0x72, 0x69, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_api_api_proto_rawDescOnce sync.Once + file_pkg_api_api_proto_rawDescData = file_pkg_api_api_proto_rawDesc +) + +func file_pkg_api_api_proto_rawDescGZIP() []byte { + file_pkg_api_api_proto_rawDescOnce.Do(func() { + file_pkg_api_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_api_api_proto_rawDescData) + }) + return file_pkg_api_api_proto_rawDescData +} + +var file_pkg_api_api_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_pkg_api_api_proto_msgTypes = make([]protoimpl.MessageInfo, 53) +var file_pkg_api_api_proto_goTypes = []interface{}{ + (Event)(0), // 0: nri.pkg.api.v1alpha1.Event + (ContainerState)(0), // 1: nri.pkg.api.v1alpha1.ContainerState + (LogRequest_Level)(0), // 2: nri.pkg.api.v1alpha1.LogRequest.Level + (*RegisterPluginRequest)(nil), // 3: nri.pkg.api.v1alpha1.RegisterPluginRequest + (*UpdateContainersRequest)(nil), // 4: nri.pkg.api.v1alpha1.UpdateContainersRequest + (*UpdateContainersResponse)(nil), // 5: nri.pkg.api.v1alpha1.UpdateContainersResponse + (*LogRequest)(nil), // 6: nri.pkg.api.v1alpha1.LogRequest + (*ConfigureRequest)(nil), // 7: nri.pkg.api.v1alpha1.ConfigureRequest + (*ConfigureResponse)(nil), // 8: nri.pkg.api.v1alpha1.ConfigureResponse + (*SynchronizeRequest)(nil), // 9: nri.pkg.api.v1alpha1.SynchronizeRequest + (*SynchronizeResponse)(nil), // 10: nri.pkg.api.v1alpha1.SynchronizeResponse + (*CreateContainerRequest)(nil), // 11: nri.pkg.api.v1alpha1.CreateContainerRequest + (*CreateContainerResponse)(nil), // 12: nri.pkg.api.v1alpha1.CreateContainerResponse + (*UpdateContainerRequest)(nil), // 13: nri.pkg.api.v1alpha1.UpdateContainerRequest + (*UpdateContainerResponse)(nil), // 14: nri.pkg.api.v1alpha1.UpdateContainerResponse + (*StopContainerRequest)(nil), // 15: nri.pkg.api.v1alpha1.StopContainerRequest + (*StopContainerResponse)(nil), // 16: nri.pkg.api.v1alpha1.StopContainerResponse + (*StateChangeEvent)(nil), // 17: nri.pkg.api.v1alpha1.StateChangeEvent + (*Empty)(nil), // 18: nri.pkg.api.v1alpha1.Empty + (*PodSandbox)(nil), // 19: nri.pkg.api.v1alpha1.PodSandbox + (*LinuxPodSandbox)(nil), // 20: nri.pkg.api.v1alpha1.LinuxPodSandbox + (*Container)(nil), // 21: nri.pkg.api.v1alpha1.Container + (*Mount)(nil), // 22: nri.pkg.api.v1alpha1.Mount + (*Hooks)(nil), // 23: nri.pkg.api.v1alpha1.Hooks + (*Hook)(nil), // 24: nri.pkg.api.v1alpha1.Hook + (*LinuxContainer)(nil), // 25: nri.pkg.api.v1alpha1.LinuxContainer + (*LinuxNamespace)(nil), // 26: nri.pkg.api.v1alpha1.LinuxNamespace + (*LinuxDevice)(nil), // 27: nri.pkg.api.v1alpha1.LinuxDevice + (*LinuxDeviceCgroup)(nil), // 28: nri.pkg.api.v1alpha1.LinuxDeviceCgroup + (*CDIDevice)(nil), // 29: nri.pkg.api.v1alpha1.CDIDevice + (*LinuxResources)(nil), // 30: nri.pkg.api.v1alpha1.LinuxResources + (*LinuxMemory)(nil), // 31: nri.pkg.api.v1alpha1.LinuxMemory + (*LinuxCPU)(nil), // 32: nri.pkg.api.v1alpha1.LinuxCPU + (*HugepageLimit)(nil), // 33: nri.pkg.api.v1alpha1.HugepageLimit + (*POSIXRlimit)(nil), // 34: nri.pkg.api.v1alpha1.POSIXRlimit + (*LinuxPids)(nil), // 35: nri.pkg.api.v1alpha1.LinuxPids + (*ContainerAdjustment)(nil), // 36: nri.pkg.api.v1alpha1.ContainerAdjustment + (*LinuxContainerAdjustment)(nil), // 37: nri.pkg.api.v1alpha1.LinuxContainerAdjustment + (*ContainerUpdate)(nil), // 38: nri.pkg.api.v1alpha1.ContainerUpdate + (*LinuxContainerUpdate)(nil), // 39: nri.pkg.api.v1alpha1.LinuxContainerUpdate + (*ContainerEviction)(nil), // 40: nri.pkg.api.v1alpha1.ContainerEviction + (*KeyValue)(nil), // 41: nri.pkg.api.v1alpha1.KeyValue + (*OptionalString)(nil), // 42: nri.pkg.api.v1alpha1.OptionalString + (*OptionalInt)(nil), // 43: nri.pkg.api.v1alpha1.OptionalInt + (*OptionalInt32)(nil), // 44: nri.pkg.api.v1alpha1.OptionalInt32 + (*OptionalUInt32)(nil), // 45: nri.pkg.api.v1alpha1.OptionalUInt32 + (*OptionalInt64)(nil), // 46: nri.pkg.api.v1alpha1.OptionalInt64 + (*OptionalUInt64)(nil), // 47: nri.pkg.api.v1alpha1.OptionalUInt64 + (*OptionalBool)(nil), // 48: nri.pkg.api.v1alpha1.OptionalBool + (*OptionalFileMode)(nil), // 49: nri.pkg.api.v1alpha1.OptionalFileMode + nil, // 50: nri.pkg.api.v1alpha1.PodSandbox.LabelsEntry + nil, // 51: nri.pkg.api.v1alpha1.PodSandbox.AnnotationsEntry + nil, // 52: nri.pkg.api.v1alpha1.Container.LabelsEntry + nil, // 53: nri.pkg.api.v1alpha1.Container.AnnotationsEntry + nil, // 54: nri.pkg.api.v1alpha1.LinuxResources.UnifiedEntry + nil, // 55: nri.pkg.api.v1alpha1.ContainerAdjustment.AnnotationsEntry +} +var file_pkg_api_api_proto_depIdxs = []int32{ + 38, // 0: nri.pkg.api.v1alpha1.UpdateContainersRequest.update:type_name -> nri.pkg.api.v1alpha1.ContainerUpdate + 40, // 1: nri.pkg.api.v1alpha1.UpdateContainersRequest.evict:type_name -> nri.pkg.api.v1alpha1.ContainerEviction + 38, // 2: nri.pkg.api.v1alpha1.UpdateContainersResponse.failed:type_name -> nri.pkg.api.v1alpha1.ContainerUpdate + 2, // 3: nri.pkg.api.v1alpha1.LogRequest.level:type_name -> nri.pkg.api.v1alpha1.LogRequest.Level + 19, // 4: nri.pkg.api.v1alpha1.SynchronizeRequest.pods:type_name -> nri.pkg.api.v1alpha1.PodSandbox + 21, // 5: nri.pkg.api.v1alpha1.SynchronizeRequest.containers:type_name -> nri.pkg.api.v1alpha1.Container + 38, // 6: nri.pkg.api.v1alpha1.SynchronizeResponse.update:type_name -> nri.pkg.api.v1alpha1.ContainerUpdate + 19, // 7: nri.pkg.api.v1alpha1.CreateContainerRequest.pod:type_name -> nri.pkg.api.v1alpha1.PodSandbox + 21, // 8: nri.pkg.api.v1alpha1.CreateContainerRequest.container:type_name -> nri.pkg.api.v1alpha1.Container + 36, // 9: nri.pkg.api.v1alpha1.CreateContainerResponse.adjust:type_name -> nri.pkg.api.v1alpha1.ContainerAdjustment + 38, // 10: nri.pkg.api.v1alpha1.CreateContainerResponse.update:type_name -> nri.pkg.api.v1alpha1.ContainerUpdate + 40, // 11: nri.pkg.api.v1alpha1.CreateContainerResponse.evict:type_name -> nri.pkg.api.v1alpha1.ContainerEviction + 19, // 12: nri.pkg.api.v1alpha1.UpdateContainerRequest.pod:type_name -> nri.pkg.api.v1alpha1.PodSandbox + 21, // 13: nri.pkg.api.v1alpha1.UpdateContainerRequest.container:type_name -> nri.pkg.api.v1alpha1.Container + 30, // 14: nri.pkg.api.v1alpha1.UpdateContainerRequest.linux_resources:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 38, // 15: nri.pkg.api.v1alpha1.UpdateContainerResponse.update:type_name -> nri.pkg.api.v1alpha1.ContainerUpdate + 40, // 16: nri.pkg.api.v1alpha1.UpdateContainerResponse.evict:type_name -> nri.pkg.api.v1alpha1.ContainerEviction + 19, // 17: nri.pkg.api.v1alpha1.StopContainerRequest.pod:type_name -> nri.pkg.api.v1alpha1.PodSandbox + 21, // 18: nri.pkg.api.v1alpha1.StopContainerRequest.container:type_name -> nri.pkg.api.v1alpha1.Container + 38, // 19: nri.pkg.api.v1alpha1.StopContainerResponse.update:type_name -> nri.pkg.api.v1alpha1.ContainerUpdate + 0, // 20: nri.pkg.api.v1alpha1.StateChangeEvent.event:type_name -> nri.pkg.api.v1alpha1.Event + 19, // 21: nri.pkg.api.v1alpha1.StateChangeEvent.pod:type_name -> nri.pkg.api.v1alpha1.PodSandbox + 21, // 22: nri.pkg.api.v1alpha1.StateChangeEvent.container:type_name -> nri.pkg.api.v1alpha1.Container + 50, // 23: nri.pkg.api.v1alpha1.PodSandbox.labels:type_name -> nri.pkg.api.v1alpha1.PodSandbox.LabelsEntry + 51, // 24: nri.pkg.api.v1alpha1.PodSandbox.annotations:type_name -> nri.pkg.api.v1alpha1.PodSandbox.AnnotationsEntry + 20, // 25: nri.pkg.api.v1alpha1.PodSandbox.linux:type_name -> nri.pkg.api.v1alpha1.LinuxPodSandbox + 30, // 26: nri.pkg.api.v1alpha1.LinuxPodSandbox.pod_overhead:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 30, // 27: nri.pkg.api.v1alpha1.LinuxPodSandbox.pod_resources:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 26, // 28: nri.pkg.api.v1alpha1.LinuxPodSandbox.namespaces:type_name -> nri.pkg.api.v1alpha1.LinuxNamespace + 30, // 29: nri.pkg.api.v1alpha1.LinuxPodSandbox.resources:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 1, // 30: nri.pkg.api.v1alpha1.Container.state:type_name -> nri.pkg.api.v1alpha1.ContainerState + 52, // 31: nri.pkg.api.v1alpha1.Container.labels:type_name -> nri.pkg.api.v1alpha1.Container.LabelsEntry + 53, // 32: nri.pkg.api.v1alpha1.Container.annotations:type_name -> nri.pkg.api.v1alpha1.Container.AnnotationsEntry + 22, // 33: nri.pkg.api.v1alpha1.Container.mounts:type_name -> nri.pkg.api.v1alpha1.Mount + 23, // 34: nri.pkg.api.v1alpha1.Container.hooks:type_name -> nri.pkg.api.v1alpha1.Hooks + 25, // 35: nri.pkg.api.v1alpha1.Container.linux:type_name -> nri.pkg.api.v1alpha1.LinuxContainer + 34, // 36: nri.pkg.api.v1alpha1.Container.rlimits:type_name -> nri.pkg.api.v1alpha1.POSIXRlimit + 24, // 37: nri.pkg.api.v1alpha1.Hooks.prestart:type_name -> nri.pkg.api.v1alpha1.Hook + 24, // 38: nri.pkg.api.v1alpha1.Hooks.create_runtime:type_name -> nri.pkg.api.v1alpha1.Hook + 24, // 39: nri.pkg.api.v1alpha1.Hooks.create_container:type_name -> nri.pkg.api.v1alpha1.Hook + 24, // 40: nri.pkg.api.v1alpha1.Hooks.start_container:type_name -> nri.pkg.api.v1alpha1.Hook + 24, // 41: nri.pkg.api.v1alpha1.Hooks.poststart:type_name -> nri.pkg.api.v1alpha1.Hook + 24, // 42: nri.pkg.api.v1alpha1.Hooks.poststop:type_name -> nri.pkg.api.v1alpha1.Hook + 43, // 43: nri.pkg.api.v1alpha1.Hook.timeout:type_name -> nri.pkg.api.v1alpha1.OptionalInt + 26, // 44: nri.pkg.api.v1alpha1.LinuxContainer.namespaces:type_name -> nri.pkg.api.v1alpha1.LinuxNamespace + 27, // 45: nri.pkg.api.v1alpha1.LinuxContainer.devices:type_name -> nri.pkg.api.v1alpha1.LinuxDevice + 30, // 46: nri.pkg.api.v1alpha1.LinuxContainer.resources:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 43, // 47: nri.pkg.api.v1alpha1.LinuxContainer.oom_score_adj:type_name -> nri.pkg.api.v1alpha1.OptionalInt + 49, // 48: nri.pkg.api.v1alpha1.LinuxDevice.file_mode:type_name -> nri.pkg.api.v1alpha1.OptionalFileMode + 45, // 49: nri.pkg.api.v1alpha1.LinuxDevice.uid:type_name -> nri.pkg.api.v1alpha1.OptionalUInt32 + 45, // 50: nri.pkg.api.v1alpha1.LinuxDevice.gid:type_name -> nri.pkg.api.v1alpha1.OptionalUInt32 + 46, // 51: nri.pkg.api.v1alpha1.LinuxDeviceCgroup.major:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 46, // 52: nri.pkg.api.v1alpha1.LinuxDeviceCgroup.minor:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 31, // 53: nri.pkg.api.v1alpha1.LinuxResources.memory:type_name -> nri.pkg.api.v1alpha1.LinuxMemory + 32, // 54: nri.pkg.api.v1alpha1.LinuxResources.cpu:type_name -> nri.pkg.api.v1alpha1.LinuxCPU + 33, // 55: nri.pkg.api.v1alpha1.LinuxResources.hugepage_limits:type_name -> nri.pkg.api.v1alpha1.HugepageLimit + 42, // 56: nri.pkg.api.v1alpha1.LinuxResources.blockio_class:type_name -> nri.pkg.api.v1alpha1.OptionalString + 42, // 57: nri.pkg.api.v1alpha1.LinuxResources.rdt_class:type_name -> nri.pkg.api.v1alpha1.OptionalString + 54, // 58: nri.pkg.api.v1alpha1.LinuxResources.unified:type_name -> nri.pkg.api.v1alpha1.LinuxResources.UnifiedEntry + 28, // 59: nri.pkg.api.v1alpha1.LinuxResources.devices:type_name -> nri.pkg.api.v1alpha1.LinuxDeviceCgroup + 35, // 60: nri.pkg.api.v1alpha1.LinuxResources.pids:type_name -> nri.pkg.api.v1alpha1.LinuxPids + 46, // 61: nri.pkg.api.v1alpha1.LinuxMemory.limit:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 46, // 62: nri.pkg.api.v1alpha1.LinuxMemory.reservation:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 46, // 63: nri.pkg.api.v1alpha1.LinuxMemory.swap:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 46, // 64: nri.pkg.api.v1alpha1.LinuxMemory.kernel:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 46, // 65: nri.pkg.api.v1alpha1.LinuxMemory.kernel_tcp:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 47, // 66: nri.pkg.api.v1alpha1.LinuxMemory.swappiness:type_name -> nri.pkg.api.v1alpha1.OptionalUInt64 + 48, // 67: nri.pkg.api.v1alpha1.LinuxMemory.disable_oom_killer:type_name -> nri.pkg.api.v1alpha1.OptionalBool + 48, // 68: nri.pkg.api.v1alpha1.LinuxMemory.use_hierarchy:type_name -> nri.pkg.api.v1alpha1.OptionalBool + 47, // 69: nri.pkg.api.v1alpha1.LinuxCPU.shares:type_name -> nri.pkg.api.v1alpha1.OptionalUInt64 + 46, // 70: nri.pkg.api.v1alpha1.LinuxCPU.quota:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 47, // 71: nri.pkg.api.v1alpha1.LinuxCPU.period:type_name -> nri.pkg.api.v1alpha1.OptionalUInt64 + 46, // 72: nri.pkg.api.v1alpha1.LinuxCPU.realtime_runtime:type_name -> nri.pkg.api.v1alpha1.OptionalInt64 + 47, // 73: nri.pkg.api.v1alpha1.LinuxCPU.realtime_period:type_name -> nri.pkg.api.v1alpha1.OptionalUInt64 + 55, // 74: nri.pkg.api.v1alpha1.ContainerAdjustment.annotations:type_name -> nri.pkg.api.v1alpha1.ContainerAdjustment.AnnotationsEntry + 22, // 75: nri.pkg.api.v1alpha1.ContainerAdjustment.mounts:type_name -> nri.pkg.api.v1alpha1.Mount + 41, // 76: nri.pkg.api.v1alpha1.ContainerAdjustment.env:type_name -> nri.pkg.api.v1alpha1.KeyValue + 23, // 77: nri.pkg.api.v1alpha1.ContainerAdjustment.hooks:type_name -> nri.pkg.api.v1alpha1.Hooks + 37, // 78: nri.pkg.api.v1alpha1.ContainerAdjustment.linux:type_name -> nri.pkg.api.v1alpha1.LinuxContainerAdjustment + 34, // 79: nri.pkg.api.v1alpha1.ContainerAdjustment.rlimits:type_name -> nri.pkg.api.v1alpha1.POSIXRlimit + 29, // 80: nri.pkg.api.v1alpha1.ContainerAdjustment.CDI_devices:type_name -> nri.pkg.api.v1alpha1.CDIDevice + 27, // 81: nri.pkg.api.v1alpha1.LinuxContainerAdjustment.devices:type_name -> nri.pkg.api.v1alpha1.LinuxDevice + 30, // 82: nri.pkg.api.v1alpha1.LinuxContainerAdjustment.resources:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 43, // 83: nri.pkg.api.v1alpha1.LinuxContainerAdjustment.oom_score_adj:type_name -> nri.pkg.api.v1alpha1.OptionalInt + 39, // 84: nri.pkg.api.v1alpha1.ContainerUpdate.linux:type_name -> nri.pkg.api.v1alpha1.LinuxContainerUpdate + 30, // 85: nri.pkg.api.v1alpha1.LinuxContainerUpdate.resources:type_name -> nri.pkg.api.v1alpha1.LinuxResources + 3, // 86: nri.pkg.api.v1alpha1.Runtime.RegisterPlugin:input_type -> nri.pkg.api.v1alpha1.RegisterPluginRequest + 4, // 87: nri.pkg.api.v1alpha1.Runtime.UpdateContainers:input_type -> nri.pkg.api.v1alpha1.UpdateContainersRequest + 7, // 88: nri.pkg.api.v1alpha1.Plugin.Configure:input_type -> nri.pkg.api.v1alpha1.ConfigureRequest + 9, // 89: nri.pkg.api.v1alpha1.Plugin.Synchronize:input_type -> nri.pkg.api.v1alpha1.SynchronizeRequest + 18, // 90: nri.pkg.api.v1alpha1.Plugin.Shutdown:input_type -> nri.pkg.api.v1alpha1.Empty + 11, // 91: nri.pkg.api.v1alpha1.Plugin.CreateContainer:input_type -> nri.pkg.api.v1alpha1.CreateContainerRequest + 13, // 92: nri.pkg.api.v1alpha1.Plugin.UpdateContainer:input_type -> nri.pkg.api.v1alpha1.UpdateContainerRequest + 15, // 93: nri.pkg.api.v1alpha1.Plugin.StopContainer:input_type -> nri.pkg.api.v1alpha1.StopContainerRequest + 17, // 94: nri.pkg.api.v1alpha1.Plugin.StateChange:input_type -> nri.pkg.api.v1alpha1.StateChangeEvent + 6, // 95: nri.pkg.api.v1alpha1.HostFunctions.Log:input_type -> nri.pkg.api.v1alpha1.LogRequest + 18, // 96: nri.pkg.api.v1alpha1.Runtime.RegisterPlugin:output_type -> nri.pkg.api.v1alpha1.Empty + 5, // 97: nri.pkg.api.v1alpha1.Runtime.UpdateContainers:output_type -> nri.pkg.api.v1alpha1.UpdateContainersResponse + 8, // 98: nri.pkg.api.v1alpha1.Plugin.Configure:output_type -> nri.pkg.api.v1alpha1.ConfigureResponse + 10, // 99: nri.pkg.api.v1alpha1.Plugin.Synchronize:output_type -> nri.pkg.api.v1alpha1.SynchronizeResponse + 18, // 100: nri.pkg.api.v1alpha1.Plugin.Shutdown:output_type -> nri.pkg.api.v1alpha1.Empty + 12, // 101: nri.pkg.api.v1alpha1.Plugin.CreateContainer:output_type -> nri.pkg.api.v1alpha1.CreateContainerResponse + 14, // 102: nri.pkg.api.v1alpha1.Plugin.UpdateContainer:output_type -> nri.pkg.api.v1alpha1.UpdateContainerResponse + 16, // 103: nri.pkg.api.v1alpha1.Plugin.StopContainer:output_type -> nri.pkg.api.v1alpha1.StopContainerResponse + 18, // 104: nri.pkg.api.v1alpha1.Plugin.StateChange:output_type -> nri.pkg.api.v1alpha1.Empty + 18, // 105: nri.pkg.api.v1alpha1.HostFunctions.Log:output_type -> nri.pkg.api.v1alpha1.Empty + 96, // [96:106] is the sub-list for method output_type + 86, // [86:96] is the sub-list for method input_type + 86, // [86:86] is the sub-list for extension type_name + 86, // [86:86] is the sub-list for extension extendee + 0, // [0:86] is the sub-list for field type_name +} + +func init() { file_pkg_api_api_proto_init() } +func file_pkg_api_api_proto_init() { + if File_pkg_api_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_api_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegisterPluginRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SynchronizeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SynchronizeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateChangeEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PodSandbox); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxPodSandbox); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Hooks); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Hook); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxContainer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxDevice); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxDeviceCgroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CDIDevice); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxResources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxMemory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxCPU); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HugepageLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*POSIXRlimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxPids); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerAdjustment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxContainerAdjustment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinuxContainerUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerEviction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalInt); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalInt32); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalUInt32); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalInt64); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalUInt64); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalBool); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_api_api_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OptionalFileMode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_api_api_proto_rawDesc, + NumEnums: 3, + NumMessages: 53, + NumExtensions: 0, + NumServices: 3, + }, + GoTypes: file_pkg_api_api_proto_goTypes, + DependencyIndexes: file_pkg_api_api_proto_depIdxs, + EnumInfos: file_pkg_api_api_proto_enumTypes, + MessageInfos: file_pkg_api_api_proto_msgTypes, + }.Build() + File_pkg_api_api_proto = out.File + file_pkg_api_api_proto_rawDesc = nil + file_pkg_api_api_proto_goTypes = nil + file_pkg_api_api_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api.proto b/vendor/github.com/containerd/nri/pkg/api/api.proto new file mode 100644 index 0000000000..1c56e7ad2e --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api.proto @@ -0,0 +1,492 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package nri.pkg.api.v1alpha1; + +option go_package = "github.com/containerd/nri/pkg/api;api"; + +// Runtime service is the public API runtimes expose for NRI plugins. +// On this interface RPC requests are initiated by the plugin. This +// only covers plugin registration and unsolicited container updates. +// The rest of the API is defined by the Plugin service. +service Runtime { + // RegisterPlugin registers the plugin with the runtime. + rpc RegisterPlugin(RegisterPluginRequest) returns (Empty); + // UpdateContainers requests unsolicited updates to a set of containers. + rpc UpdateContainers(UpdateContainersRequest) returns (UpdateContainersResponse); +} + +message RegisterPluginRequest { + // Name of the plugin to register. + string plugin_name = 1; + // Plugin invocation index. Plugins are called in ascending index order. + string plugin_idx = 2; +} + +message UpdateContainersRequest { + // List of containers to update. + repeated ContainerUpdate update = 1; + // List of containers to evict. + repeated ContainerEviction evict = 2; +} + +message UpdateContainersResponse { + // Containers that the runtime failed to update. + repeated ContainerUpdate failed = 1; +} + + +// +// Plugin is the API NRI uses to interact with plugins. It is used to +// - configure a plugin and subscribe it for lifecycle events +// - synchronize the state of a plugin with that of the runtime +// - hook a plugin into the lifecycle events of its interest +// +// During configuration the plugin tells the runtime which lifecycle events +// it wishes to get hooked into. Once configured, the plugin is synchronized +// with the runtime by receiving the list of pods and containers known to +// the runtime. The plugin can request changes to any of the containers in +// response. After initial synchronization the plugin starts receiving the +// events it subscribed for as they occur in the runtime. For container +// creation, update, and stop events, the plugin can request changes, both +// to the container that triggered the event or any other existing container +// in the runtime. +// +// For a subset of the container lifecycle events, NRI defines an additional +// Post-variant of the event. These variants are defined for CreateContainer, +// StartContainer, and UpdateContainer. For creation and update, these events +// can be used by plugins to discover the full extent of changes applied to +// the container, including any changes made by other active plugins. +// +// go:plugin type=plugin version=1 +service Plugin { + // Configure the plugin and get its event subscription. + rpc Configure(ConfigureRequest) returns (ConfigureResponse); + + // Synchronize the plugin with the state of the runtime. + rpc Synchronize(SynchronizeRequest) returns (SynchronizeResponse); + + // Shutdown a plugin (let it know the runtime is going down). + rpc Shutdown(Empty) returns (Empty); + + // CreateContainer relays the corresponding request to the plugin. In + // response, the plugin can adjust the container being created, and + // update other containers in the runtime. Container adjustment can + // alter labels, annotations, mounts, devices, environment variables, + // OCI hooks, and assigned container resources. Updates can alter + // assigned container resources. + rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse); + + // UpdateContainer relays the corresponding request to the plugin. + // The plugin can alter how the container is updated and request updates + // to additional containers in the runtime. + rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse); + + // StopContainer relays the corresponding request to the plugin. The plugin + // can update any of the remaining containers in the runtime in response. + rpc StopContainer(StopContainerRequest) returns (StopContainerResponse); + + // StateChange relays any remaining pod or container lifecycle/state change + // events the plugin has subscribed for. These can be used to trigger any + // plugin-specific processing which needs to occur in connection with any of + // these events. + rpc StateChange(StateChangeEvent) returns (Empty); +} + +// go:plugin type=host +service HostFunctions { + // Log displays a log message + rpc Log(LogRequest) returns (Empty) {} +} + +message LogRequest { + string msg = 1; + + enum Level { + LEVEL_UNSPECIFIED = 0; + LEVEL_DEBUG = 1; + LEVEL_INFO = 2; + LEVEL_WARN = 3; + LEVEL_ERROR = 4; + } + Level level = 2; +} + +message ConfigureRequest { + // Any plugin-specific data, if present among the NRI configuration. + string config = 1; + // Name of the runtime NRI is running in. + string runtime_name = 2; + // Version of the runtime NRI is running in. + string runtime_version = 3; + // Configured registration timeout in milliseconds. + int64 registration_timeout = 4; + // Configured request processing timeout in milliseconds. + int64 request_timeout = 5; +} + +message ConfigureResponse { + // Events to subscribe the plugin for. Each bit set corresponds to an + // enumerated Event. + int32 events = 2; +} + +message SynchronizeRequest { + // Pods known to the runtime. + repeated PodSandbox pods = 1; + // Containers known to the runtime. + repeated Container containers = 2; + // Whether there are more pods and containers to follow. + bool more = 3; +} + +message SynchronizeResponse { + // Updates to containers requested by the plugin. + repeated ContainerUpdate update = 1; + // Whether the client is able to handle more advertised pods and containers. + bool more = 2; +} + +message CreateContainerRequest { + // Pod of container being created. + PodSandbox pod = 1; + // Container being created. + Container container = 2; +} + +message CreateContainerResponse { + // Requested adjustments to container being created. + ContainerAdjustment adjust = 1; + // Requested updates to other existing containers. + repeated ContainerUpdate update = 2; + // Requested eviction of existing containers. + repeated ContainerEviction evict = 3; +} + +message UpdateContainerRequest { + // Pod of container being updated. + PodSandbox pod = 1; + // Container being updated. + Container container = 2; + // Resources to update. + LinuxResources linux_resources = 3; +} + +message UpdateContainerResponse { + // Requested updates to containers. + repeated ContainerUpdate update = 1; + // Requested eviction of containers. + repeated ContainerEviction evict = 2; +} + +message StopContainerRequest { + // Pod of container being stopped. + PodSandbox pod = 1; + // Container being stopped. + Container container = 2; +} + +message StopContainerResponse { + // Requested updates to containers. + repeated ContainerUpdate update = 1; +} + +message StateChangeEvent { + // Event type of notification. + Event event = 1; + // Pod this notification is sent for. If this event is related to a container, + // pod is set to the pod of the container. + PodSandbox pod = 2; + // Container this notification is sent for. If the event is related to a pod, + // container is nil. + Container container = 3; +} + +// Empty response for those *Requests that are semantically events. +message Empty {} + +// Events that plugins can subscribe to in ConfigureResponse. +enum Event { + UNKNOWN = 0; + RUN_POD_SANDBOX = 1; + STOP_POD_SANDBOX = 2; + REMOVE_POD_SANDBOX = 3; + CREATE_CONTAINER = 4; + POST_CREATE_CONTAINER = 5; + START_CONTAINER = 6; + POST_START_CONTAINER = 7; + UPDATE_CONTAINER = 8; + POST_UPDATE_CONTAINER = 9; + STOP_CONTAINER = 10; + REMOVE_CONTAINER = 11; + LAST = 12; +} + +// Pod metadata that is considered relevant for a plugin. +message PodSandbox { + string id = 1; + string name = 2; + string uid = 3; + string namespace = 4; + map labels = 5; + map annotations = 6; + string runtime_handler = 7; + LinuxPodSandbox linux = 8; + uint32 pid = 9; // for NRI v1 emulation + repeated string ips = 10; +} + +// PodSandbox linux-specific metadata +message LinuxPodSandbox { + LinuxResources pod_overhead = 1; + LinuxResources pod_resources = 2; + string cgroup_parent = 3; + string cgroups_path = 4; // for NRI v1 emulation + repeated LinuxNamespace namespaces = 5; // for NRI v1 emulation + LinuxResources resources = 6; // for NRI v1 emulation +} + +// Container metadata that is considered relevant for a plugin. +message Container { + string id = 1; + string pod_sandbox_id = 2; + string name = 3; + ContainerState state = 4; + map labels = 5; + map annotations = 6; + repeated string args = 7; + repeated string env = 8; + repeated Mount mounts = 9; + Hooks hooks = 10; + LinuxContainer linux = 11; + uint32 pid = 12; // for NRI v1 emulation + repeated POSIXRlimit rlimits = 13; +} + +// Possible container states. +enum ContainerState { + CONTAINER_UNKNOWN = 0; + CONTAINER_CREATED = 1; + CONTAINER_PAUSED = 2; // is this useful/necessary ? + CONTAINER_RUNNING = 3; + CONTAINER_STOPPED = 4; +} + +// A container mount. +message Mount { + string destination = 1; + string type = 2; + string source = 3; + repeated string options = 4; +} + +// Container OCI hooks. +message Hooks { + repeated Hook prestart = 1; + repeated Hook create_runtime = 2; + repeated Hook create_container = 3; + repeated Hook start_container = 4; + repeated Hook poststart = 5; + repeated Hook poststop = 6; +} + +// One OCI hook. +message Hook { + string path = 1; + repeated string args = 2; + repeated string env = 3; + OptionalInt timeout = 4; +} + +// Container (linux) metadata. +message LinuxContainer { + repeated LinuxNamespace namespaces = 1; + repeated LinuxDevice devices = 2; + LinuxResources resources = 3; + OptionalInt oom_score_adj = 4; + string cgroups_path = 5; +} + +// A linux namespace. +message LinuxNamespace { + string type = 1; + string path = 2; +} + +// A container (linux) device. +message LinuxDevice { + string path = 1; + string type = 2; + int64 major = 3; + int64 minor = 4; + OptionalFileMode file_mode = 5; + OptionalUInt32 uid = 6; + OptionalUInt32 gid = 7; +} + +// A linux device cgroup controller rule. +message LinuxDeviceCgroup { + bool allow = 1; + string type = 2; + OptionalInt64 major = 3; + OptionalInt64 minor = 4; + string access = 5; +} + +// A CDI device reference. +message CDIDevice { + string name = 1; +} + +// Container (linux) resources. +message LinuxResources { + LinuxMemory memory = 1; + LinuxCPU cpu = 2; + repeated HugepageLimit hugepage_limits = 3; + OptionalString blockio_class = 4; + OptionalString rdt_class = 5; + map unified = 6; + repeated LinuxDeviceCgroup devices = 7; // for NRI v1 emulation + LinuxPids pids = 8; +} + +// Memory-related parts of (linux) resources. +message LinuxMemory { + OptionalInt64 limit = 1; + OptionalInt64 reservation = 2; + OptionalInt64 swap = 3; + OptionalInt64 kernel = 4; + OptionalInt64 kernel_tcp = 5; + OptionalUInt64 swappiness = 6; + OptionalBool disable_oom_killer = 7; + OptionalBool use_hierarchy = 8; +} + +// CPU-related parts of (linux) resources. +message LinuxCPU { + OptionalUInt64 shares = 1; + OptionalInt64 quota = 2; + OptionalUInt64 period = 3; + OptionalInt64 realtime_runtime = 4; + OptionalUInt64 realtime_period = 5; + string cpus = 6; + string mems = 7; +} + +// Container huge page limit. +message HugepageLimit { + string page_size = 1; + uint64 limit = 2; +} + +// Container rlimits +message POSIXRlimit { + string type = 1; + uint64 hard = 2; + uint64 soft = 3; +} + +// Pids-related parts of (linux) resources. +message LinuxPids { + int64 limit = 1; +} + +// Requested adjustments to a container being created. +message ContainerAdjustment { + map annotations = 2; + repeated Mount mounts = 3; + repeated KeyValue env = 4; + Hooks hooks = 5; + LinuxContainerAdjustment linux = 6; + repeated POSIXRlimit rlimits = 7; + repeated CDIDevice CDI_devices = 8; +} + +// Adjustments to (linux) resources. +message LinuxContainerAdjustment { + repeated LinuxDevice devices = 1; + LinuxResources resources = 2; + string cgroups_path = 3; + OptionalInt oom_score_adj = 4; +} + +// Requested update to an already created container. +message ContainerUpdate { + string container_id = 1; + LinuxContainerUpdate linux = 2; + bool ignore_failure = 3; +} + +// Updates to (linux) resources. +message LinuxContainerUpdate { + LinuxResources resources = 1; +} + +// Request to evict (IOW unsolicitedly stop) a container. +message ContainerEviction { + // Container to evict. + string container_id = 1; + // Human-readable reason for eviction. + string reason = 2; +} + +// KeyValue represents an environment variable. +message KeyValue { + string key = 1; + string value = 2; +} + +// An optional string value. +message OptionalString { + string value = 1; +} + +// An optional signed integer value. +message OptionalInt { + int64 value = 1; +} + +// An optional 32-bit signed integer value. +message OptionalInt32 { + int32 value = 1; +} + +// An optional 32-bit unsigned integer value. +message OptionalUInt32 { + uint32 value = 1; +} + +// An optional 64-bit signed integer value. +message OptionalInt64 { + int64 value = 1; +} + +// An optional 64-bit unsigned integer value. +message OptionalUInt64 { + uint64 value = 1; +} + +// An optional boolean value. +message OptionalBool { + bool value = 1; +} + +// An optional value of file permissions. +message OptionalFileMode { + uint32 value = 1; +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api_host.pb.go b/vendor/github.com/containerd/nri/pkg/api/api_host.pb.go new file mode 100644 index 0000000000..d5f5dc3a7d --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api_host.pb.go @@ -0,0 +1,667 @@ +//go:build !tinygo.wasm + +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v3.20.1 +// source: pkg/api/api.proto + +package api + +import ( + context "context" + errors "errors" + fmt "fmt" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" + sys "github.com/tetratelabs/wazero/sys" + os "os" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _hostFunctions struct { + HostFunctions +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func (h _hostFunctions) Instantiate(ctx context.Context, r wazero.Runtime) error { + envBuilder := r.NewHostModuleBuilder("env") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Log), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("log") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +// Log displays a log message + +func (h _hostFunctions) _Log(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(LogRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Log(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +const PluginPluginAPIVersion = 1 + +type PluginPlugin struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func NewPluginPlugin(ctx context.Context, opts ...wazeroConfigOption) (*PluginPlugin, error) { + o := &WazeroConfig{ + newRuntime: DefaultWazeroRuntime(), + moduleConfig: wazero.NewModuleConfig(), + } + + for _, opt := range opts { + opt(o) + } + + return &PluginPlugin{ + newRuntime: o.newRuntime, + moduleConfig: o.moduleConfig, + }, nil +} + +type plugin interface { + Close(ctx context.Context) error + Plugin +} + +func (p *PluginPlugin) Load(ctx context.Context, pluginPath string, hostFunctions HostFunctions) (plugin, error) { + b, err := os.ReadFile(pluginPath) + if err != nil { + return nil, err + } + + // Create a new runtime so that multiple modules will not conflict + r, err := p.newRuntime(ctx) + if err != nil { + return nil, err + } + + h := _hostFunctions{hostFunctions} + + if err := h.Instantiate(ctx, r); err != nil { + return nil, err + } + + // Compile the WebAssembly module using the default configuration. + code, err := r.CompileModule(ctx, b) + if err != nil { + return nil, err + } + + // InstantiateModule runs the "_start" function, WASI's "main". + module, err := r.InstantiateModule(ctx, code, p.moduleConfig) + if err != nil { + // Note: Most compilers do not exit the module after running "_start", + // unless there was an Error. This allows you to call exported functions. + if exitErr, ok := err.(*sys.ExitError); ok && exitErr.ExitCode() != 0 { + return nil, fmt.Errorf("unexpected exit_code: %d", exitErr.ExitCode()) + } else if !ok { + return nil, err + } + } + + // Compare API versions with the loading plugin + apiVersion := module.ExportedFunction("plugin_api_version") + if apiVersion == nil { + return nil, errors.New("plugin_api_version is not exported") + } + results, err := apiVersion.Call(ctx) + if err != nil { + return nil, err + } else if len(results) != 1 { + return nil, errors.New("invalid plugin_api_version signature") + } + if results[0] != PluginPluginAPIVersion { + return nil, fmt.Errorf("API version mismatch, host: %d, plugin: %d", PluginPluginAPIVersion, results[0]) + } + + configure := module.ExportedFunction("plugin_configure") + if configure == nil { + return nil, errors.New("plugin_configure is not exported") + } + synchronize := module.ExportedFunction("plugin_synchronize") + if synchronize == nil { + return nil, errors.New("plugin_synchronize is not exported") + } + shutdown := module.ExportedFunction("plugin_shutdown") + if shutdown == nil { + return nil, errors.New("plugin_shutdown is not exported") + } + createcontainer := module.ExportedFunction("plugin_create_container") + if createcontainer == nil { + return nil, errors.New("plugin_create_container is not exported") + } + updatecontainer := module.ExportedFunction("plugin_update_container") + if updatecontainer == nil { + return nil, errors.New("plugin_update_container is not exported") + } + stopcontainer := module.ExportedFunction("plugin_stop_container") + if stopcontainer == nil { + return nil, errors.New("plugin_stop_container is not exported") + } + statechange := module.ExportedFunction("plugin_state_change") + if statechange == nil { + return nil, errors.New("plugin_state_change is not exported") + } + + malloc := module.ExportedFunction("malloc") + if malloc == nil { + return nil, errors.New("malloc is not exported") + } + + free := module.ExportedFunction("free") + if free == nil { + return nil, errors.New("free is not exported") + } + return &pluginPlugin{ + runtime: r, + module: module, + malloc: malloc, + free: free, + configure: configure, + synchronize: synchronize, + shutdown: shutdown, + createcontainer: createcontainer, + updatecontainer: updatecontainer, + stopcontainer: stopcontainer, + statechange: statechange, + }, nil +} + +func (p *pluginPlugin) Close(ctx context.Context) (err error) { + if r := p.runtime; r != nil { + r.Close(ctx) + } + return +} + +type pluginPlugin struct { + runtime wazero.Runtime + module api.Module + malloc api.Function + free api.Function + configure api.Function + synchronize api.Function + shutdown api.Function + createcontainer api.Function + updatecontainer api.Function + stopcontainer api.Function + statechange api.Function +} + +func (p *pluginPlugin) Configure(ctx context.Context, request *ConfigureRequest) (*ConfigureResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.configure.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ConfigureResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *pluginPlugin) Synchronize(ctx context.Context, request *SynchronizeRequest) (*SynchronizeResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.synchronize.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(SynchronizeResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *pluginPlugin) Shutdown(ctx context.Context, request *Empty) (*Empty, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.shutdown.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(Empty) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *pluginPlugin) CreateContainer(ctx context.Context, request *CreateContainerRequest) (*CreateContainerResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.createcontainer.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(CreateContainerResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *pluginPlugin) UpdateContainer(ctx context.Context, request *UpdateContainerRequest) (*UpdateContainerResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.updatecontainer.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(UpdateContainerResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *pluginPlugin) StopContainer(ctx context.Context, request *StopContainerRequest) (*StopContainerResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.stopcontainer.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(StopContainerResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *pluginPlugin) StateChange(ctx context.Context, request *StateChangeEvent) (*Empty, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by TinyGo, but TinyGo is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.statechange.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(Empty) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api_options.pb.go b/vendor/github.com/containerd/nri/pkg/api/api_options.pb.go new file mode 100644 index 0000000000..3639f14437 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api_options.pb.go @@ -0,0 +1,62 @@ +//go:build !tinygo.wasm + +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v3.20.1 +// source: pkg/api/api.proto + +package api + +import ( + context "context" + wazero "github.com/tetratelabs/wazero" + wasi_snapshot_preview1 "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +type wazeroConfigOption func(plugin *WazeroConfig) + +type WazeroNewRuntime func(context.Context) (wazero.Runtime, error) + +type WazeroConfig struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func WazeroRuntime(newRuntime WazeroNewRuntime) wazeroConfigOption { + return func(h *WazeroConfig) { + h.newRuntime = newRuntime + } +} + +func DefaultWazeroRuntime() WazeroNewRuntime { + return func(ctx context.Context) (wazero.Runtime, error) { + r := wazero.NewRuntime(ctx) + if _, err := wasi_snapshot_preview1.Instantiate(ctx, r); err != nil { + return nil, err + } + + return r, nil + } +} + +func WazeroModuleConfig(moduleConfig wazero.ModuleConfig) wazeroConfigOption { + return func(h *WazeroConfig) { + h.moduleConfig = moduleConfig + } +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api_plugin.pb.go b/vendor/github.com/containerd/nri/pkg/api/api_plugin.pb.go new file mode 100644 index 0000000000..30deb7d6bf --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api_plugin.pb.go @@ -0,0 +1,240 @@ +//go:build tinygo.wasm + +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v3.20.1 +// source: pkg/api/api.proto + +package api + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +const PluginPluginAPIVersion = 1 + +//export plugin_api_version +func _plugin_api_version() uint64 { + return PluginPluginAPIVersion +} + +var plugin Plugin + +func RegisterPlugin(p Plugin) { + plugin = p +} + +//export plugin_configure +func _plugin_configure(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ConfigureRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.Configure(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//export plugin_synchronize +func _plugin_synchronize(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(SynchronizeRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.Synchronize(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//export plugin_shutdown +func _plugin_shutdown(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(Empty) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.Shutdown(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//export plugin_create_container +func _plugin_create_container(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(CreateContainerRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.CreateContainer(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//export plugin_update_container +func _plugin_update_container(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(UpdateContainerRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.UpdateContainer(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//export plugin_stop_container +func _plugin_stop_container(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(StopContainerRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.StopContainer(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//export plugin_state_change +func _plugin_state_change(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(StateChangeEvent) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := plugin.StateChange(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +type hostFunctions struct{} + +func NewHostFunctions() HostFunctions { + return hostFunctions{} +} + +//go:wasmimport env log +func _log(ptr uint32, size uint32) uint64 + +func (h hostFunctions) Log(ctx context.Context, request *LogRequest) (*Empty, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _log(ptr, size) + wasm.FreePtr(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(Empty) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api_service.pb.go b/vendor/github.com/containerd/nri/pkg/api/api_service.pb.go new file mode 100644 index 0000000000..643b14d3c8 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api_service.pb.go @@ -0,0 +1,90 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v3.20.1 +// source: pkg/api/api.proto + +package api + +import ( + context "context" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Plugin is the API NRI uses to interact with plugins. It is used to +// - configure a plugin and subscribe it for lifecycle events +// - synchronize the state of a plugin with that of the runtime +// - hook a plugin into the lifecycle events of its interest +// +// During configuration the plugin tells the runtime which lifecycle events +// it wishes to get hooked into. Once configured, the plugin is synchronized +// with the runtime by receiving the list of pods and containers known to +// the runtime. The plugin can request changes to any of the containers in +// response. After initial synchronization the plugin starts receiving the +// events it subscribed for as they occur in the runtime. For container +// creation, update, and stop events, the plugin can request changes, both +// to the container that triggered the event or any other existing container +// in the runtime. +// +// For a subset of the container lifecycle events, NRI defines an additional +// Post-variant of the event. These variants are defined for CreateContainer, +// StartContainer, and UpdateContainer. For creation and update, these events +// can be used by plugins to discover the full extent of changes applied to +// the container, including any changes made by other active plugins. +// +// go:plugin type=plugin version=1 +type Plugin interface { + // Configure the plugin and get its event subscription. + Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) + // Synchronize the plugin with the state of the runtime. + Synchronize(context.Context, *SynchronizeRequest) (*SynchronizeResponse, error) + // Shutdown a plugin (let it know the runtime is going down). + Shutdown(context.Context, *Empty) (*Empty, error) + // CreateContainer relays the corresponding request to the plugin. In + // response, the plugin can adjust the container being created, and + // update other containers in the runtime. Container adjustment can + // alter labels, annotations, mounts, devices, environment variables, + // OCI hooks, and assigned container resources. Updates can alter + // assigned container resources. + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + // UpdateContainer relays the corresponding request to the plugin. + // The plugin can alter how the container is updated and request updates + // to additional containers in the runtime. + UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + // StopContainer relays the corresponding request to the plugin. The plugin + // can update any of the remaining containers in the runtime in response. + StopContainer(context.Context, *StopContainerRequest) (*StopContainerResponse, error) + // StateChange relays any remaining pod or container lifecycle/state change + // events the plugin has subscribed for. These can be used to trigger any + // plugin-specific processing which needs to occur in connection with any of + // these events. + StateChange(context.Context, *StateChangeEvent) (*Empty, error) +} + +// go:plugin type=host +type HostFunctions interface { + // Log displays a log message + Log(context.Context, *LogRequest) (*Empty, error) +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api_ttrpc.pb.go b/vendor/github.com/containerd/nri/pkg/api/api_ttrpc.pb.go new file mode 100644 index 0000000000..93aa1844b6 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api_ttrpc.pb.go @@ -0,0 +1,230 @@ +//go:build !tinygo.wasm + +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: pkg/api/api.proto +package api + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" +) + +type RuntimeService interface { + RegisterPlugin(context.Context, *RegisterPluginRequest) (*Empty, error) + UpdateContainers(context.Context, *UpdateContainersRequest) (*UpdateContainersResponse, error) +} + +func RegisterRuntimeService(srv *ttrpc.Server, svc RuntimeService) { + srv.RegisterService("nri.pkg.api.v1alpha1.Runtime", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "RegisterPlugin": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req RegisterPluginRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.RegisterPlugin(ctx, &req) + }, + "UpdateContainers": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateContainersRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.UpdateContainers(ctx, &req) + }, + }, + }) +} + +type runtimeClient struct { + client *ttrpc.Client +} + +func NewRuntimeClient(client *ttrpc.Client) RuntimeService { + return &runtimeClient{ + client: client, + } +} + +func (c *runtimeClient) RegisterPlugin(ctx context.Context, req *RegisterPluginRequest) (*Empty, error) { + var resp Empty + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Runtime", "RegisterPlugin", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *runtimeClient) UpdateContainers(ctx context.Context, req *UpdateContainersRequest) (*UpdateContainersResponse, error) { + var resp UpdateContainersResponse + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Runtime", "UpdateContainers", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +type PluginService interface { + Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) + Synchronize(context.Context, *SynchronizeRequest) (*SynchronizeResponse, error) + Shutdown(context.Context, *Empty) (*Empty, error) + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + StopContainer(context.Context, *StopContainerRequest) (*StopContainerResponse, error) + StateChange(context.Context, *StateChangeEvent) (*Empty, error) +} + +func RegisterPluginService(srv *ttrpc.Server, svc PluginService) { + srv.RegisterService("nri.pkg.api.v1alpha1.Plugin", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "Configure": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ConfigureRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Configure(ctx, &req) + }, + "Synchronize": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req SynchronizeRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Synchronize(ctx, &req) + }, + "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Shutdown(ctx, &req) + }, + "CreateContainer": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateContainerRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CreateContainer(ctx, &req) + }, + "UpdateContainer": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateContainerRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.UpdateContainer(ctx, &req) + }, + "StopContainer": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StopContainerRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.StopContainer(ctx, &req) + }, + "StateChange": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StateChangeEvent + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.StateChange(ctx, &req) + }, + }, + }) +} + +type pluginClient struct { + client *ttrpc.Client +} + +func NewPluginClient(client *ttrpc.Client) PluginService { + return &pluginClient{ + client: client, + } +} + +func (c *pluginClient) Configure(ctx context.Context, req *ConfigureRequest) (*ConfigureResponse, error) { + var resp ConfigureResponse + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "Configure", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *pluginClient) Synchronize(ctx context.Context, req *SynchronizeRequest) (*SynchronizeResponse, error) { + var resp SynchronizeResponse + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "Synchronize", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *pluginClient) Shutdown(ctx context.Context, req *Empty) (*Empty, error) { + var resp Empty + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "Shutdown", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *pluginClient) CreateContainer(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) { + var resp CreateContainerResponse + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "CreateContainer", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *pluginClient) UpdateContainer(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) { + var resp UpdateContainerResponse + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "UpdateContainer", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *pluginClient) StopContainer(ctx context.Context, req *StopContainerRequest) (*StopContainerResponse, error) { + var resp StopContainerResponse + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "StopContainer", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *pluginClient) StateChange(ctx context.Context, req *StateChangeEvent) (*Empty, error) { + var resp Empty + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.Plugin", "StateChange", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +type HostFunctionsService interface { + Log(context.Context, *LogRequest) (*Empty, error) +} + +func RegisterHostFunctionsService(srv *ttrpc.Server, svc HostFunctionsService) { + srv.RegisterService("nri.pkg.api.v1alpha1.HostFunctions", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "Log": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req LogRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Log(ctx, &req) + }, + }, + }) +} + +type hostFunctionsClient struct { + client *ttrpc.Client +} + +func NewHostFunctionsClient(client *ttrpc.Client) HostFunctionsService { + return &hostFunctionsClient{ + client: client, + } +} + +func (c *hostFunctionsClient) Log(ctx context.Context, req *LogRequest) (*Empty, error) { + var resp Empty + if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.HostFunctions", "Log", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/nri/pkg/api/api_vtproto.pb.go b/vendor/github.com/containerd/nri/pkg/api/api_vtproto.pb.go new file mode 100644 index 0000000000..d40f21f6ad --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/api_vtproto.pb.go @@ -0,0 +1,12053 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v3.20.1 +// source: pkg/api/api.proto + +package api + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RegisterPluginRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegisterPluginRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RegisterPluginRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PluginIdx) > 0 { + i -= len(m.PluginIdx) + copy(dAtA[i:], m.PluginIdx) + i = encodeVarint(dAtA, i, uint64(len(m.PluginIdx))) + i-- + dAtA[i] = 0x12 + } + if len(m.PluginName) > 0 { + i -= len(m.PluginName) + copy(dAtA[i:], m.PluginName) + i = encodeVarint(dAtA, i, uint64(len(m.PluginName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateContainersRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainersRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateContainersRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Evict) > 0 { + for iNdEx := len(m.Evict) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Evict[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Update) > 0 { + for iNdEx := len(m.Update) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Update[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpdateContainersResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainersResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateContainersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Failed) > 0 { + for iNdEx := len(m.Failed) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Failed[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Level != 0 { + i = encodeVarint(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarint(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigureRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigureRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ConfigureRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RequestTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.RequestTimeout)) + i-- + dAtA[i] = 0x28 + } + if m.RegistrationTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.RegistrationTimeout)) + i-- + dAtA[i] = 0x20 + } + if len(m.RuntimeVersion) > 0 { + i -= len(m.RuntimeVersion) + copy(dAtA[i:], m.RuntimeVersion) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeVersion))) + i-- + dAtA[i] = 0x1a + } + if len(m.RuntimeName) > 0 { + i -= len(m.RuntimeName) + copy(dAtA[i:], m.RuntimeName) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarint(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigureResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigureResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ConfigureResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Events != 0 { + i = encodeVarint(dAtA, i, uint64(m.Events)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *SynchronizeRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SynchronizeRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SynchronizeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.More { + i-- + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Containers[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Pods) > 0 { + for iNdEx := len(m.Pods) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Pods[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SynchronizeResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SynchronizeResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SynchronizeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.More { + i-- + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Update) > 0 { + for iNdEx := len(m.Update) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Update[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CreateContainerRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateContainerRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateContainerRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Container != nil { + size, err := m.Container.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Pod != nil { + size, err := m.Pod.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateContainerResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateContainerResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateContainerResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Evict) > 0 { + for iNdEx := len(m.Evict) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Evict[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Update) > 0 { + for iNdEx := len(m.Update) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Update[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Adjust != nil { + size, err := m.Adjust.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateContainerRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainerRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateContainerRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LinuxResources != nil { + size, err := m.LinuxResources.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Container != nil { + size, err := m.Container.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Pod != nil { + size, err := m.Pod.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateContainerResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainerResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateContainerResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Evict) > 0 { + for iNdEx := len(m.Evict) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Evict[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Update) > 0 { + for iNdEx := len(m.Update) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Update[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StopContainerRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopContainerRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopContainerRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Container != nil { + size, err := m.Container.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Pod != nil { + size, err := m.Pod.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopContainerResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopContainerResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopContainerResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Update) > 0 { + for iNdEx := len(m.Update) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Update[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StateChangeEvent) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StateChangeEvent) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StateChangeEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Container != nil { + size, err := m.Container.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Pod != nil { + size, err := m.Pod.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Event != 0 { + i = encodeVarint(dAtA, i, uint64(m.Event)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Empty) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PodSandbox) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandbox) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PodSandbox) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if m.Pid != 0 { + i = encodeVarint(dAtA, i, uint64(m.Pid)) + i-- + dAtA[i] = 0x48 + } + if m.Linux != nil { + size, err := m.Linux.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RuntimeHandler) > 0 { + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0x3a + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarint(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x22 + } + if len(m.Uid) > 0 { + i -= len(m.Uid) + copy(dAtA[i:], m.Uid) + i = encodeVarint(dAtA, i, uint64(len(m.Uid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxPodSandbox) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxPodSandbox) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxPodSandbox) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Resources != nil { + size, err := m.Resources.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Namespaces[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.CgroupsPath) > 0 { + i -= len(m.CgroupsPath) + copy(dAtA[i:], m.CgroupsPath) + i = encodeVarint(dAtA, i, uint64(len(m.CgroupsPath))) + i-- + dAtA[i] = 0x22 + } + if len(m.CgroupParent) > 0 { + i -= len(m.CgroupParent) + copy(dAtA[i:], m.CgroupParent) + i = encodeVarint(dAtA, i, uint64(len(m.CgroupParent))) + i-- + dAtA[i] = 0x1a + } + if m.PodResources != nil { + size, err := m.PodResources.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.PodOverhead != nil { + size, err := m.PodOverhead.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Container) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rlimits) > 0 { + for iNdEx := len(m.Rlimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rlimits[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.Pid != 0 { + i = encodeVarint(dAtA, i, uint64(m.Pid)) + i-- + dAtA[i] = 0x60 + } + if m.Linux != nil { + size, err := m.Linux.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.Hooks != nil { + size, err := m.Hooks.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Mounts[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x20 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarint(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mount) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Mount) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Options[iNdEx]) + copy(dAtA[i:], m.Options[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Options[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Source) > 0 { + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarint(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0x1a + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + } + if len(m.Destination) > 0 { + i -= len(m.Destination) + copy(dAtA[i:], m.Destination) + i = encodeVarint(dAtA, i, uint64(len(m.Destination))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Hooks) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Hooks) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Hooks) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Poststop) > 0 { + for iNdEx := len(m.Poststop) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Poststop[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Poststart) > 0 { + for iNdEx := len(m.Poststart) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Poststart[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.StartContainer) > 0 { + for iNdEx := len(m.StartContainer) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StartContainer[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.CreateContainer) > 0 { + for iNdEx := len(m.CreateContainer) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CreateContainer[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.CreateRuntime) > 0 { + for iNdEx := len(m.CreateRuntime) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CreateRuntime[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Prestart) > 0 { + for iNdEx := len(m.Prestart) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Prestart[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Hook) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Hook) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Hook) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := m.Timeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxContainer) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainer) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxContainer) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CgroupsPath) > 0 { + i -= len(m.CgroupsPath) + copy(dAtA[i:], m.CgroupsPath) + i = encodeVarint(dAtA, i, uint64(len(m.CgroupsPath))) + i-- + dAtA[i] = 0x2a + } + if m.OomScoreAdj != nil { + size, err := m.OomScoreAdj.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Resources != nil { + size, err := m.Resources.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Devices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Namespaces[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LinuxNamespace) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxNamespace) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxNamespace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxDevice) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxDevice) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxDevice) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Gid != nil { + size, err := m.Gid.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Uid != nil { + size, err := m.Uid.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.FileMode != nil { + size, err := m.FileMode.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Minor != 0 { + i = encodeVarint(dAtA, i, uint64(m.Minor)) + i-- + dAtA[i] = 0x20 + } + if m.Major != 0 { + i = encodeVarint(dAtA, i, uint64(m.Major)) + i-- + dAtA[i] = 0x18 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxDeviceCgroup) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxDeviceCgroup) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxDeviceCgroup) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Access) > 0 { + i -= len(m.Access) + copy(dAtA[i:], m.Access) + i = encodeVarint(dAtA, i, uint64(len(m.Access))) + i-- + dAtA[i] = 0x2a + } + if m.Minor != nil { + size, err := m.Minor.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Major != nil { + size, err := m.Major.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + } + if m.Allow { + i-- + if m.Allow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CDIDevice) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CDIDevice) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CDIDevice) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxResources) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxResources) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxResources) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Pids != nil { + size, err := m.Pids.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Devices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Unified) > 0 { + for k := range m.Unified { + v := m.Unified[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.RdtClass != nil { + size, err := m.RdtClass.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.BlockioClass != nil { + size, err := m.BlockioClass.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.HugepageLimits) > 0 { + for iNdEx := len(m.HugepageLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HugepageLimits[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Cpu != nil { + size, err := m.Cpu.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Memory != nil { + size, err := m.Memory.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxMemory) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxMemory) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxMemory) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseHierarchy != nil { + size, err := m.UseHierarchy.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.DisableOomKiller != nil { + size, err := m.DisableOomKiller.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Swappiness != nil { + size, err := m.Swappiness.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.KernelTcp != nil { + size, err := m.KernelTcp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Kernel != nil { + size, err := m.Kernel.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Swap != nil { + size, err := m.Swap.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Reservation != nil { + size, err := m.Reservation.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Limit != nil { + size, err := m.Limit.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxCPU) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxCPU) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxCPU) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mems) > 0 { + i -= len(m.Mems) + copy(dAtA[i:], m.Mems) + i = encodeVarint(dAtA, i, uint64(len(m.Mems))) + i-- + dAtA[i] = 0x3a + } + if len(m.Cpus) > 0 { + i -= len(m.Cpus) + copy(dAtA[i:], m.Cpus) + i = encodeVarint(dAtA, i, uint64(len(m.Cpus))) + i-- + dAtA[i] = 0x32 + } + if m.RealtimePeriod != nil { + size, err := m.RealtimePeriod.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.RealtimeRuntime != nil { + size, err := m.RealtimeRuntime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Period != nil { + size, err := m.Period.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Quota != nil { + size, err := m.Quota.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Shares != nil { + size, err := m.Shares.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HugepageLimit) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugepageLimit) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HugepageLimit) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if len(m.PageSize) > 0 { + i -= len(m.PageSize) + copy(dAtA[i:], m.PageSize) + i = encodeVarint(dAtA, i, uint64(len(m.PageSize))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *POSIXRlimit) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *POSIXRlimit) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *POSIXRlimit) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Soft != 0 { + i = encodeVarint(dAtA, i, uint64(m.Soft)) + i-- + dAtA[i] = 0x18 + } + if m.Hard != 0 { + i = encodeVarint(dAtA, i, uint64(m.Hard)) + i-- + dAtA[i] = 0x10 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxPids) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxPids) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxPids) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ContainerAdjustment) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerAdjustment) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ContainerAdjustment) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CDIDevices) > 0 { + for iNdEx := len(m.CDIDevices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CDIDevices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Rlimits) > 0 { + for iNdEx := len(m.Rlimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rlimits[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.Linux != nil { + size, err := m.Linux.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.Hooks != nil { + size, err := m.Hooks.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Env[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Mounts[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *LinuxContainerAdjustment) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerAdjustment) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxContainerAdjustment) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OomScoreAdj != nil { + size, err := m.OomScoreAdj.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.CgroupsPath) > 0 { + i -= len(m.CgroupsPath) + copy(dAtA[i:], m.CgroupsPath) + i = encodeVarint(dAtA, i, uint64(len(m.CgroupsPath))) + i-- + dAtA[i] = 0x1a + } + if m.Resources != nil { + size, err := m.Resources.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Devices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ContainerUpdate) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerUpdate) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ContainerUpdate) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IgnoreFailure { + i-- + if m.IgnoreFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Linux != nil { + size, err := m.Linux.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxContainerUpdate) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerUpdate) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LinuxContainerUpdate) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Resources != nil { + size, err := m.Resources.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerEviction) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerEviction) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ContainerEviction) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarint(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OptionalString) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalString) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalString) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OptionalInt) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OptionalInt32) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalInt32) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalInt32) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OptionalUInt32) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalUInt32) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalUInt32) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OptionalInt64) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalInt64) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalInt64) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OptionalUInt64) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalUInt64) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalUInt64) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OptionalBool) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalBool) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalBool) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OptionalFileMode) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalFileMode) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OptionalFileMode) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RegisterPluginRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PluginName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.PluginIdx) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateContainersRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Update) > 0 { + for _, e := range m.Update { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Evict) > 0 { + for _, e := range m.Evict { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateContainersResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Failed) > 0 { + for _, e := range m.Failed { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Msg) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Level != 0 { + n += 1 + sov(uint64(m.Level)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigureRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Config) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.RuntimeName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.RuntimeVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.RegistrationTimeout != 0 { + n += 1 + sov(uint64(m.RegistrationTimeout)) + } + if m.RequestTimeout != 0 { + n += 1 + sov(uint64(m.RequestTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigureResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Events != 0 { + n += 1 + sov(uint64(m.Events)) + } + n += len(m.unknownFields) + return n +} + +func (m *SynchronizeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pods) > 0 { + for _, e := range m.Pods { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.More { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SynchronizeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Update) > 0 { + for _, e := range m.Update { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.More { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CreateContainerRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pod != nil { + l = m.Pod.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Container != nil { + l = m.Container.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateContainerResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Adjust != nil { + l = m.Adjust.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Update) > 0 { + for _, e := range m.Update { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Evict) > 0 { + for _, e := range m.Evict { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateContainerRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pod != nil { + l = m.Pod.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Container != nil { + l = m.Container.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.LinuxResources != nil { + l = m.LinuxResources.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateContainerResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Update) > 0 { + for _, e := range m.Update { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Evict) > 0 { + for _, e := range m.Evict { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StopContainerRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pod != nil { + l = m.Pod.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Container != nil { + l = m.Container.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopContainerResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Update) > 0 { + for _, e := range m.Update { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StateChangeEvent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != 0 { + n += 1 + sov(uint64(m.Event)) + } + if m.Pod != nil { + l = m.Pod.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Container != nil { + l = m.Container.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Empty) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PodSandbox) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.RuntimeHandler) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sov(uint64(m.Pid)) + } + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxPodSandbox) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodOverhead != nil { + l = m.PodOverhead.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.PodResources != nil { + l = m.PodResources.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.CgroupParent) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.CgroupsPath) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, e := range m.Namespaces { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Resources != nil { + l = m.Resources.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Container) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Hooks != nil { + l = m.Hooks.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sov(uint64(m.Pid)) + } + if len(m.Rlimits) > 0 { + for _, e := range m.Rlimits { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Mount) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Destination) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Hooks) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Prestart) > 0 { + for _, e := range m.Prestart { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.CreateRuntime) > 0 { + for _, e := range m.CreateRuntime { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.CreateContainer) > 0 { + for _, e := range m.CreateContainer { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.StartContainer) > 0 { + for _, e := range m.StartContainer { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Poststart) > 0 { + for _, e := range m.Poststart { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Poststop) > 0 { + for _, e := range m.Poststop { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Hook) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Timeout != nil { + l = m.Timeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxContainer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Namespaces) > 0 { + for _, e := range m.Namespaces { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Resources != nil { + l = m.Resources.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.OomScoreAdj != nil { + l = m.OomScoreAdj.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.CgroupsPath) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxNamespace) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxDevice) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Major != 0 { + n += 1 + sov(uint64(m.Major)) + } + if m.Minor != 0 { + n += 1 + sov(uint64(m.Minor)) + } + if m.FileMode != nil { + l = m.FileMode.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Uid != nil { + l = m.Uid.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Gid != nil { + l = m.Gid.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxDeviceCgroup) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allow { + n += 2 + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Major != nil { + l = m.Major.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Minor != nil { + l = m.Minor.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Access) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CDIDevice) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Memory != nil { + l = m.Memory.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Cpu != nil { + l = m.Cpu.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.HugepageLimits) > 0 { + for _, e := range m.HugepageLimits { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.BlockioClass != nil { + l = m.BlockioClass.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.RdtClass != nil { + l = m.RdtClass.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Unified) > 0 { + for k, v := range m.Unified { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Pids != nil { + l = m.Pids.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxMemory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != nil { + l = m.Limit.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Reservation != nil { + l = m.Reservation.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Kernel != nil { + l = m.Kernel.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.KernelTcp != nil { + l = m.KernelTcp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Swappiness != nil { + l = m.Swappiness.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.DisableOomKiller != nil { + l = m.DisableOomKiller.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.UseHierarchy != nil { + l = m.UseHierarchy.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxCPU) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shares != nil { + l = m.Shares.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Quota != nil { + l = m.Quota.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Period != nil { + l = m.Period.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.RealtimeRuntime != nil { + l = m.RealtimeRuntime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.RealtimePeriod != nil { + l = m.RealtimePeriod.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cpus) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mems) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HugepageLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PageSize) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + n += len(m.unknownFields) + return n +} + +func (m *POSIXRlimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Hard != 0 { + n += 1 + sov(uint64(m.Hard)) + } + if m.Soft != 0 { + n += 1 + sov(uint64(m.Soft)) + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxPids) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + n += len(m.unknownFields) + return n +} + +func (m *ContainerAdjustment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Hooks != nil { + l = m.Hooks.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Rlimits) > 0 { + for _, e := range m.Rlimits { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.CDIDevices) > 0 { + for _, e := range m.CDIDevices { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxContainerAdjustment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Resources != nil { + l = m.Resources.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.CgroupsPath) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.OomScoreAdj != nil { + l = m.OomScoreAdj.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ContainerUpdate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.IgnoreFailure { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *LinuxContainerUpdate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Resources != nil { + l = m.Resources.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ContainerEviction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalInt32) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalUInt32) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalInt64) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalUInt64) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalBool) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *OptionalFileMode) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RegisterPluginRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegisterPluginRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegisterPluginRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PluginName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginIdx", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PluginIdx = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainersRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Update = append(m.Update, &ContainerUpdate{}) + if err := m.Update[len(m.Update)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evict", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evict = append(m.Evict, &ContainerEviction{}) + if err := m.Evict[len(m.Evict)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainersResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failed = append(m.Failed, &ContainerUpdate{}) + if err := m.Failed[len(m.Failed)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= LogRequest_Level(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigureRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigureRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigureRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationTimeout", wireType) + } + m.RegistrationTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegistrationTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTimeout", wireType) + } + m.RequestTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigureResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigureResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigureResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + m.Events = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Events |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SynchronizeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SynchronizeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SynchronizeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pods = append(m.Pods, &PodSandbox{}) + if err := m.Pods[len(m.Pods)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, &Container{}) + if err := m.Containers[len(m.Containers)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SynchronizeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SynchronizeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SynchronizeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Update = append(m.Update, &ContainerUpdate{}) + if err := m.Update[len(m.Update)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateContainerRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pod == nil { + m.Pod = &PodSandbox{} + } + if err := m.Pod.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &Container{} + } + if err := m.Container.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateContainerResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Adjust", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Adjust == nil { + m.Adjust = &ContainerAdjustment{} + } + if err := m.Adjust.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Update = append(m.Update, &ContainerUpdate{}) + if err := m.Update[len(m.Update)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evict", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evict = append(m.Evict, &ContainerEviction{}) + if err := m.Evict[len(m.Evict)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainerRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pod == nil { + m.Pod = &PodSandbox{} + } + if err := m.Pod.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &Container{} + } + if err := m.Container.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LinuxResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LinuxResources == nil { + m.LinuxResources = &LinuxResources{} + } + if err := m.LinuxResources.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainerResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Update = append(m.Update, &ContainerUpdate{}) + if err := m.Update[len(m.Update)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evict", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evict = append(m.Evict, &ContainerEviction{}) + if err := m.Evict[len(m.Evict)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopContainerRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pod == nil { + m.Pod = &PodSandbox{} + } + if err := m.Pod.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &Container{} + } + if err := m.Container.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopContainerResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Update = append(m.Update, &ContainerUpdate{}) + if err := m.Update[len(m.Update)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StateChangeEvent) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StateChangeEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StateChangeEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + m.Event = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Event |= Event(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pod == nil { + m.Pod = &PodSandbox{} + } + if err := m.Pod.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &Container{} + } + if err := m.Container.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Empty) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandbox) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandbox: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandbox: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxPodSandbox{} + } + if err := m.Linux.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxPodSandbox) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxPodSandbox: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxPodSandbox: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodOverhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodOverhead == nil { + m.PodOverhead = &LinuxResources{} + } + if err := m.PodOverhead.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodResources == nil { + m.PodResources = &LinuxResources{} + } + if err := m.PodResources.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupParent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CgroupParent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupsPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CgroupsPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, &LinuxNamespace{}) + if err := m.Namespaces[len(m.Namespaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &LinuxResources{} + } + if err := m.Resources.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= ContainerState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hooks == nil { + m.Hooks = &Hooks{} + } + if err := m.Hooks.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainer{} + } + if err := m.Linux.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rlimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rlimits = append(m.Rlimits, &POSIXRlimit{}) + if err := m.Rlimits[len(m.Rlimits)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Destination", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Destination = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Hooks) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Hooks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Hooks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prestart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prestart = append(m.Prestart, &Hook{}) + if err := m.Prestart[len(m.Prestart)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRuntime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CreateRuntime = append(m.CreateRuntime, &Hook{}) + if err := m.CreateRuntime[len(m.CreateRuntime)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CreateContainer = append(m.CreateContainer, &Hook{}) + if err := m.CreateContainer[len(m.CreateContainer)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartContainer = append(m.StartContainer, &Hook{}) + if err := m.StartContainer[len(m.StartContainer)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Poststart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Poststart = append(m.Poststart, &Hook{}) + if err := m.Poststart[len(m.Poststart)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Poststop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Poststop = append(m.Poststop, &Hook{}) + if err := m.Poststop[len(m.Poststop)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Hook) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Hook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Hook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &OptionalInt{} + } + if err := m.Timeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainer) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, &LinuxNamespace{}) + if err := m.Namespaces[len(m.Namespaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &LinuxDevice{}) + if err := m.Devices[len(m.Devices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &LinuxResources{} + } + if err := m.Resources.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OomScoreAdj", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OomScoreAdj == nil { + m.OomScoreAdj = &OptionalInt{} + } + if err := m.OomScoreAdj.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupsPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CgroupsPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxNamespace) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxDevice) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + m.Major = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Major |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + m.Minor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minor |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileMode", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FileMode == nil { + m.FileMode = &OptionalFileMode{} + } + if err := m.FileMode.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Uid == nil { + m.Uid = &OptionalUInt32{} + } + if err := m.Uid.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gid == nil { + m.Gid = &OptionalUInt32{} + } + if err := m.Gid.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxDeviceCgroup) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxDeviceCgroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxDeviceCgroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allow = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Major == nil { + m.Major = &OptionalInt64{} + } + if err := m.Major.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Minor == nil { + m.Minor = &OptionalInt64{} + } + if err := m.Minor.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Access", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Access = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CDIDevice) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CDIDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CDIDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxResources) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &LinuxMemory{} + } + if err := m.Memory.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &LinuxCPU{} + } + if err := m.Cpu.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HugepageLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HugepageLimits = append(m.HugepageLimits, &HugepageLimit{}) + if err := m.HugepageLimits[len(m.HugepageLimits)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockioClass", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockioClass == nil { + m.BlockioClass = &OptionalString{} + } + if err := m.BlockioClass.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RdtClass", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RdtClass == nil { + m.RdtClass = &OptionalString{} + } + if err := m.RdtClass.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unified", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Unified == nil { + m.Unified = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Unified[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &LinuxDeviceCgroup{}) + if err := m.Devices[len(m.Devices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pids == nil { + m.Pids = &LinuxPids{} + } + if err := m.Pids.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxMemory) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxMemory: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxMemory: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limit == nil { + m.Limit = &OptionalInt64{} + } + if err := m.Limit.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reservation == nil { + m.Reservation = &OptionalInt64{} + } + if err := m.Reservation.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &OptionalInt64{} + } + if err := m.Swap.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &OptionalInt64{} + } + if err := m.Kernel.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTcp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTcp == nil { + m.KernelTcp = &OptionalInt64{} + } + if err := m.KernelTcp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swappiness", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swappiness == nil { + m.Swappiness = &OptionalUInt64{} + } + if err := m.Swappiness.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableOomKiller", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DisableOomKiller == nil { + m.DisableOomKiller = &OptionalBool{} + } + if err := m.DisableOomKiller.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UseHierarchy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UseHierarchy == nil { + m.UseHierarchy = &OptionalBool{} + } + if err := m.UseHierarchy.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxCPU) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxCPU: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxCPU: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shares", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shares == nil { + m.Shares = &OptionalUInt64{} + } + if err := m.Shares.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quota == nil { + m.Quota = &OptionalInt64{} + } + if err := m.Quota.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Period == nil { + m.Period = &OptionalUInt64{} + } + if err := m.Period.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RealtimeRuntime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RealtimeRuntime == nil { + m.RealtimeRuntime = &OptionalInt64{} + } + if err := m.RealtimeRuntime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RealtimePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RealtimePeriod == nil { + m.RealtimePeriod = &OptionalUInt64{} + } + if err := m.RealtimePeriod.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpus", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cpus = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mems", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mems = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugepageLimit) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugepageLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugepageLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PageSize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PageSize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *POSIXRlimit) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: POSIXRlimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: POSIXRlimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + m.Hard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hard |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) + } + m.Soft = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Soft |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxPids) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxPids: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxPids: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerAdjustment) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerAdjustment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerAdjustment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, &KeyValue{}) + if err := m.Env[len(m.Env)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hooks == nil { + m.Hooks = &Hooks{} + } + if err := m.Hooks.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerAdjustment{} + } + if err := m.Linux.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rlimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rlimits = append(m.Rlimits, &POSIXRlimit{}) + if err := m.Rlimits[len(m.Rlimits)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CDIDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CDIDevices = append(m.CDIDevices, &CDIDevice{}) + if err := m.CDIDevices[len(m.CDIDevices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainerAdjustment) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerAdjustment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerAdjustment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &LinuxDevice{}) + if err := m.Devices[len(m.Devices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &LinuxResources{} + } + if err := m.Resources.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupsPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CgroupsPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OomScoreAdj", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OomScoreAdj == nil { + m.OomScoreAdj = &OptionalInt{} + } + if err := m.OomScoreAdj.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerUpdate) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerUpdate{} + } + if err := m.Linux.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreFailure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreFailure = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainerUpdate) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &LinuxResources{} + } + if err := m.Resources.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerEviction) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerEviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerEviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalString) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalInt) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalInt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalInt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalInt32) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalInt32: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalInt32: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalUInt32) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalUInt32: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalUInt32: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalInt64) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalInt64: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalInt64: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalUInt64) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalUInt64: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalUInt64: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalBool) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalFileMode) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalFileMode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalFileMode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/containerd/nri/pkg/api/device.go b/vendor/github.com/containerd/nri/pkg/api/device.go new file mode 100644 index 0000000000..c7307b169b --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/device.go @@ -0,0 +1,89 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +// FromOCILinuxDevices returns a device slice from an OCI runtime Spec. +func FromOCILinuxDevices(o []rspec.LinuxDevice) []*LinuxDevice { + var devices []*LinuxDevice + for _, d := range o { + devices = append(devices, &LinuxDevice{ + Path: d.Path, + Type: d.Type, + Major: d.Major, + Minor: d.Minor, + FileMode: FileMode(d.FileMode), + Uid: UInt32(d.UID), + Gid: UInt32(d.GID), + }) + } + return devices +} + +// ToOCI returns the linux devices for an OCI runtime Spec. +func (d *LinuxDevice) ToOCI() rspec.LinuxDevice { + if d == nil { + return rspec.LinuxDevice{} + } + + return rspec.LinuxDevice{ + Path: d.Path, + Type: d.Type, + Major: d.Major, + Minor: d.Minor, + FileMode: d.FileMode.Get(), + UID: d.Uid.Get(), + GID: d.Gid.Get(), + } +} + +// AccessString returns an OCI access string for the device. +func (d *LinuxDevice) AccessString() string { + r, w, m := "r", "w", "" + + if mode := d.FileMode.Get(); mode != nil { + perm := mode.Perm() + if (perm & 0444) != 0 { + r = "r" + } + if (perm & 0222) != 0 { + w = "w" + } + } + if d.Type == "b" { + m = "m" + } + + return r + w + m +} + +// Cmp returns true if the devices are equal. +func (d *LinuxDevice) Cmp(v *LinuxDevice) bool { + if v == nil { + return false + } + return d.Major != v.Major || d.Minor != v.Minor +} + +// IsMarkedForRemoval checks if a LinuxDevice is marked for removal. +func (d *LinuxDevice) IsMarkedForRemoval() (string, bool) { + key, marked := IsMarkedForRemoval(d.Path) + return key, marked +} diff --git a/vendor/github.com/containerd/nri/pkg/api/doc.go b/vendor/github.com/containerd/nri/pkg/api/doc.go new file mode 100644 index 0000000000..2413d025ed --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api diff --git a/vendor/github.com/containerd/nri/pkg/api/env.go b/vendor/github.com/containerd/nri/pkg/api/env.go new file mode 100644 index 0000000000..0f6e711de2 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/env.go @@ -0,0 +1,60 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + "strings" +) + +// ToOCI returns an OCI Env entry for the KeyValue. +func (e *KeyValue) ToOCI() string { + return e.Key + "=" + e.Value +} + +// FromOCIEnv returns KeyValues from an OCI runtime Spec environment. +func FromOCIEnv(in []string) []*KeyValue { + if in == nil { + return nil + } + out := []*KeyValue{} + for _, keyval := range in { + var key, val string + split := strings.SplitN(keyval, "=", 2) + switch len(split) { + case 0: + continue + case 1: + key = split[0] + case 2: + key = split[0] + val = split[1] + default: + val = strings.Join(split[1:], "=") + } + out = append(out, &KeyValue{ + Key: key, + Value: val, + }) + } + return out +} + +// IsMarkedForRemoval checks if an environment variable is marked for removal. +func (e *KeyValue) IsMarkedForRemoval() (string, bool) { + key, marked := IsMarkedForRemoval(e.Key) + return key, marked +} diff --git a/vendor/github.com/containerd/nri/pkg/api/event.go b/vendor/github.com/containerd/nri/pkg/api/event.go new file mode 100644 index 0000000000..260460adaf --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/event.go @@ -0,0 +1,172 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + "fmt" + "strings" +) + +const ( + // ValidEvents is the event mask of all valid events. + ValidEvents = EventMask((1 << (Event_LAST - 1)) - 1) +) + +// nolint +type ( + // Define *Request/*Response type aliases for *Event/Empty pairs. + + StateChangeResponse = Empty + RunPodSandboxRequest = StateChangeEvent + RunPodSandboxResponse = Empty + StopPodSandboxRequest = StateChangeEvent + StopPodSandboxResponse = Empty + RemovePodSandboxRequest = StateChangeEvent + RemovePodSandboxResponse = Empty + StartContainerRequest = StateChangeEvent + StartContainerResponse = Empty + RemoveContainerRequest = StateChangeEvent + RemoveContainerResponse = Empty + PostCreateContainerRequest = StateChangeEvent + PostCreateContainerResponse = Empty + PostStartContainerRequest = StateChangeEvent + PostStartContainerResponse = Empty + PostUpdateContainerRequest = StateChangeEvent + PostUpdateContainerResponse = Empty + + ShutdownRequest = Empty + ShutdownResponse = Empty +) + +// EventMask corresponds to a set of enumerated Events. +type EventMask int32 + +// ParseEventMask parses a string representation into an EventMask. +func ParseEventMask(events ...string) (EventMask, error) { + var mask EventMask + + bits := map[string]Event{ + "runpodsandbox": Event_RUN_POD_SANDBOX, + "stoppodsandbox": Event_STOP_POD_SANDBOX, + "removepodsandbox": Event_REMOVE_POD_SANDBOX, + "createcontainer": Event_CREATE_CONTAINER, + "postcreatecontainer": Event_POST_CREATE_CONTAINER, + "startcontainer": Event_START_CONTAINER, + "poststartcontainer": Event_POST_START_CONTAINER, + "updatecontainer": Event_UPDATE_CONTAINER, + "postupdatecontainer": Event_POST_UPDATE_CONTAINER, + "stopcontainer": Event_STOP_CONTAINER, + "removecontainer": Event_REMOVE_CONTAINER, + } + + for _, event := range events { + lcEvents := strings.ToLower(event) + for _, name := range strings.Split(lcEvents, ",") { + switch name { + case "all": + mask |= ValidEvents + continue + case "pod", "podsandbox": + for name, bit := range bits { + if strings.Contains(name, "pod") { + mask.Set(bit) + } + } + continue + case "container": + for name, bit := range bits { + if strings.Contains(name, "container") { + mask.Set(bit) + } + } + continue + } + + bit, ok := bits[strings.TrimSpace(name)] + if !ok { + return 0, fmt.Errorf("unknown event %q", name) + } + mask.Set(bit) + } + } + + return mask, nil +} + +// MustParseEventMask parses the given events, panic()ing on errors. +func MustParseEventMask(events ...string) EventMask { + mask, err := ParseEventMask(events...) + if err != nil { + panic(fmt.Sprintf("failed to parse events %s", strings.Join(events, " "))) + } + return mask +} + +// PrettyString returns a human-readable string representation of an EventMask. +func (m *EventMask) PrettyString() string { + names := map[Event]string{ + Event_RUN_POD_SANDBOX: "RunPodSandbox", + Event_STOP_POD_SANDBOX: "StopPodSandbox", + Event_REMOVE_POD_SANDBOX: "RemovePodSandbox", + Event_CREATE_CONTAINER: "CreateContainer", + Event_POST_CREATE_CONTAINER: "PostCreateContainer", + Event_START_CONTAINER: "StartContainer", + Event_POST_START_CONTAINER: "PostStartContainer", + Event_UPDATE_CONTAINER: "UpdateContainer", + Event_POST_UPDATE_CONTAINER: "PostUpdateContainer", + Event_STOP_CONTAINER: "StopContainer", + Event_REMOVE_CONTAINER: "RemoveContainer", + } + + mask := *m + events, sep := "", "" + + for bit := Event_UNKNOWN + 1; bit <= Event_LAST; bit++ { + if mask.IsSet(bit) { + events += sep + names[bit] + sep = "," + mask.Clear(bit) + } + } + + if mask != 0 { + events += sep + fmt.Sprintf("unknown(0x%x)", mask) + } + + return events +} + +// Set sets the given Events in the mask. +func (m *EventMask) Set(events ...Event) *EventMask { + for _, e := range events { + *m |= (1 << (e - 1)) + } + return m +} + +// Clear clears the given Events in the mask. +func (m *EventMask) Clear(events ...Event) *EventMask { + for _, e := range events { + *m &^= (1 << (e - 1)) + } + return m +} + +// IsSet check if the given Event is set in the mask. +func (m *EventMask) IsSet(e Event) bool { + return *m&(1<<(e-1)) != 0 +} diff --git a/vendor/github.com/containerd/nri/pkg/api/helpers.go b/vendor/github.com/containerd/nri/pkg/api/helpers.go new file mode 100644 index 0000000000..d25a443480 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/helpers.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +// DupStringSlice creates a copy of a string slice. +func DupStringSlice(in []string) []string { + if in == nil { + return nil + } + out := make([]string, len(in)) + copy(out, in) + return out +} + +// DupStringMap creates a copy of a map with string keys and values. +func DupStringMap(in map[string]string) map[string]string { + if in == nil { + return nil + } + out := map[string]string{} + for k, v := range in { + out[k] = v + } + return out +} + +// IsMarkedForRemoval checks if a key is marked for removal. +// +// The key can be an annotation name, a mount container path, a device path, +// or an environment variable name. These are all marked for removal in +// adjustments by preceding their corresponding key with a '-'. +func IsMarkedForRemoval(key string) (string, bool) { + if key == "" { + return "", false + } + if key[0] != '-' { + return key, false + } + return key[1:], true +} + +// MarkForRemoval returns a key marked for removal. +func MarkForRemoval(key string) string { + return "-" + key +} + +// ClearRemovalMarker returns a key cleared from any removal marker. +func ClearRemovalMarker(key string) string { + if key == "" { + return "" + } + if key[0] == '-' { + return key[1:] + } + return key +} diff --git a/vendor/github.com/containerd/nri/pkg/api/hooks.go b/vendor/github.com/containerd/nri/pkg/api/hooks.go new file mode 100644 index 0000000000..47dd96ea7c --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/hooks.go @@ -0,0 +1,103 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +// Append appends the given hooks to the existing ones. +func (hooks *Hooks) Append(h *Hooks) *Hooks { + if h == nil { + return hooks + } + hooks.Prestart = append(hooks.Prestart, h.Prestart...) + hooks.CreateRuntime = append(hooks.CreateRuntime, h.CreateRuntime...) + hooks.CreateContainer = append(hooks.CreateContainer, h.CreateContainer...) + hooks.StartContainer = append(hooks.StartContainer, h.StartContainer...) + hooks.Poststart = append(hooks.Poststart, h.Poststart...) + hooks.Poststop = append(hooks.Poststop, h.Poststop...) + + return hooks +} + +// Hooks returns itself it any of its hooks is set. Otherwise it returns nil. +func (hooks *Hooks) Hooks() *Hooks { + if hooks == nil { + return nil + } + + if len(hooks.Prestart) > 0 { + return hooks + } + if len(hooks.CreateRuntime) > 0 { + return hooks + } + if len(hooks.CreateContainer) > 0 { + return hooks + } + if len(hooks.StartContainer) > 0 { + return hooks + } + if len(hooks.Poststart) > 0 { + return hooks + } + if len(hooks.Poststop) > 0 { + return hooks + } + + return nil +} + +// ToOCI returns the hook for an OCI runtime Spec. +func (h *Hook) ToOCI() rspec.Hook { + return rspec.Hook{ + Path: h.Path, + Args: DupStringSlice(h.Args), + Env: DupStringSlice(h.Env), + Timeout: h.Timeout.Get(), + } +} + +// FromOCIHooks returns hooks from an OCI runtime Spec. +func FromOCIHooks(o *rspec.Hooks) *Hooks { + if o == nil { + return nil + } + return &Hooks{ + Prestart: FromOCIHookSlice(o.Prestart), + CreateRuntime: FromOCIHookSlice(o.CreateRuntime), + CreateContainer: FromOCIHookSlice(o.CreateContainer), + StartContainer: FromOCIHookSlice(o.StartContainer), + Poststart: FromOCIHookSlice(o.Poststart), + Poststop: FromOCIHookSlice(o.Poststop), + } +} + +// FromOCIHookSlice returns a hook slice from an OCI runtime Spec. +func FromOCIHookSlice(o []rspec.Hook) []*Hook { + var hooks []*Hook + for _, h := range o { + hooks = append(hooks, &Hook{ + Path: h.Path, + Args: DupStringSlice(h.Args), + Env: DupStringSlice(h.Env), + Timeout: Int(h.Timeout), + }) + } + return hooks +} diff --git a/vendor/github.com/containerd/nri/pkg/api/mount.go b/vendor/github.com/containerd/nri/pkg/api/mount.go new file mode 100644 index 0000000000..e35bf5b36e --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/mount.go @@ -0,0 +1,88 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + "sort" + + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // SELinuxRelabel is a Mount pseudo-option to request relabeling. + SELinuxRelabel = "relabel" +) + +// FromOCIMounts returns a Mount slice for an OCI runtime Spec. +func FromOCIMounts(o []rspec.Mount) []*Mount { + var mounts []*Mount + for _, m := range o { + mounts = append(mounts, &Mount{ + Destination: m.Destination, + Type: m.Type, + Source: m.Source, + Options: DupStringSlice(m.Options), + }) + } + return mounts +} + +// ToOCI returns a Mount for an OCI runtime Spec. +func (m *Mount) ToOCI(propagationQuery *string) rspec.Mount { + o := rspec.Mount{ + Destination: m.Destination, + Type: m.Type, + Source: m.Source, + } + for _, opt := range m.Options { + o.Options = append(o.Options, opt) + if propagationQuery != nil && (opt == "rprivate" || opt == "rshared" || opt == "rslave") { + *propagationQuery = opt + } + } + return o +} + +// Cmp returns true if the mounts are equal. +func (m *Mount) Cmp(v *Mount) bool { + if v == nil { + return false + } + if m.Destination != v.Destination || m.Type != v.Type || m.Source != v.Source || + len(m.Options) != len(v.Options) { + return false + } + + mOpts := make([]string, len(m.Options)) + vOpts := make([]string, len(m.Options)) + sort.Strings(mOpts) + sort.Strings(vOpts) + + for i, o := range mOpts { + if vOpts[i] != o { + return false + } + } + + return true +} + +// IsMarkedForRemoval checks if a Mount is marked for removal. +func (m *Mount) IsMarkedForRemoval() (string, bool) { + key, marked := IsMarkedForRemoval(m.Destination) + return key, marked +} diff --git a/vendor/github.com/containerd/nri/pkg/api/namespace.go b/vendor/github.com/containerd/nri/pkg/api/namespace.go new file mode 100644 index 0000000000..201106d33c --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/namespace.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +// FromOCILinuxNamespaces returns a namespace slice from an OCI runtime Spec. +func FromOCILinuxNamespaces(o []rspec.LinuxNamespace) []*LinuxNamespace { + var namespaces []*LinuxNamespace + for _, ns := range o { + namespaces = append(namespaces, &LinuxNamespace{ + Type: string(ns.Type), + Path: ns.Path, + }) + } + return namespaces +} diff --git a/vendor/github.com/containerd/nri/pkg/api/optional.go b/vendor/github.com/containerd/nri/pkg/api/optional.go new file mode 100644 index 0000000000..c8020f45f8 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/optional.go @@ -0,0 +1,341 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + "os" +) + +// +// XXX FIXME: +// +// The optional interface constructor should be updated/split up +// to avoid having to take an interface{} argument. Instead The +// optional types should have a +// - constructor taking the underlying native type +// - a Copy() function for copying them +// - a FromPointer constructor to create them from an optionally nil +// pointer to the underlying native type (to help constructing from +// structures that use a pointer to the native underlying type to +// denote optionality (OCI Spec mostly)) +// Creating from any other type should use one of these with any explicit +// cast for the argument as necessary. +// + +// String creates an Optional wrapper from its argument. +func String(v interface{}) *OptionalString { + var value string + + switch o := v.(type) { + case string: + value = o + case *string: + if o == nil { + return nil + } + value = *o + case *OptionalString: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalString{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalString) Get() *string { + if o == nil { + return nil + } + v := o.Value + return &v +} + +// Int creates an Optional wrapper from its argument. +func Int(v interface{}) *OptionalInt { + var value int64 + + switch o := v.(type) { + case int: + value = int64(o) + case *int: + if o == nil { + return nil + } + value = int64(*o) + case *OptionalInt: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalInt{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalInt) Get() *int { + if o == nil { + return nil + } + v := int(o.Value) + return &v +} + +// Int32 creates an Optional wrapper from its argument. +func Int32(v interface{}) *OptionalInt32 { + var value int32 + + switch o := v.(type) { + case int32: + value = o + case *int32: + if o == nil { + return nil + } + value = *o + case *OptionalInt32: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalInt32{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalInt32) Get() *int32 { + if o == nil { + return nil + } + v := o.Value + return &v +} + +// UInt32 creates an Optional wrapper from its argument. +func UInt32(v interface{}) *OptionalUInt32 { + var value uint32 + + switch o := v.(type) { + case uint32: + value = o + case *uint32: + if o == nil { + return nil + } + value = *o + case *OptionalUInt32: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalUInt32{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalUInt32) Get() *uint32 { + if o == nil { + return nil + } + v := o.Value + return &v +} + +// Int64 creates an Optional wrapper from its argument. +func Int64(v interface{}) *OptionalInt64 { + var value int64 + + switch o := v.(type) { + case int: + value = int64(o) + case uint: + value = int64(o) + case uint64: + value = int64(o) + case int64: + value = o + case *int64: + if o == nil { + return nil + } + value = *o + case *uint64: + if o == nil { + return nil + } + value = int64(*o) + case *OptionalInt64: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalInt64{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalInt64) Get() *int64 { + if o == nil { + return nil + } + v := o.Value + return &v +} + +// UInt64 creates an Optional wrapper from its argument. +func UInt64(v interface{}) *OptionalUInt64 { + var value uint64 + + switch o := v.(type) { + case int: + value = uint64(o) + case uint: + value = uint64(o) + case int64: + value = uint64(o) + case uint64: + value = o + case *int64: + if o == nil { + return nil + } + value = uint64(*o) + case *uint64: + if o == nil { + return nil + } + value = *o + case *OptionalUInt64: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalUInt64{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalUInt64) Get() *uint64 { + if o == nil { + return nil + } + v := o.Value + return &v +} + +// Bool creates an Optional wrapper from its argument. +func Bool(v interface{}) *OptionalBool { + var value bool + + switch o := v.(type) { + case bool: + value = o + case *bool: + if o == nil { + return nil + } + value = *o + case *OptionalBool: + if o == nil { + return nil + } + value = o.Value + default: + return nil + } + + return &OptionalBool{ + Value: value, + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalBool) Get() *bool { + if o == nil { + return nil + } + v := o.Value + return &v +} + +// FileMode creates an Optional wrapper from its argument. +func FileMode(v interface{}) *OptionalFileMode { + var value os.FileMode + + switch o := v.(type) { + case *os.FileMode: + if o == nil { + return nil + } + value = *o + case os.FileMode: + value = o + case *OptionalFileMode: + if o == nil { + return nil + } + value = os.FileMode(o.Value) + case uint32: + value = os.FileMode(o) + default: + return nil + } + + return &OptionalFileMode{ + Value: uint32(value), + } +} + +// Get returns nil if its value is unset or a pointer to the value itself. +func (o *OptionalFileMode) Get() *os.FileMode { + if o == nil { + return nil + } + v := os.FileMode(o.Value) + return &v +} diff --git a/vendor/github.com/containerd/nri/pkg/api/plugin.go b/vendor/github.com/containerd/nri/pkg/api/plugin.go new file mode 100644 index 0000000000..c4fe8fcf92 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/plugin.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + "fmt" + "strings" +) + +const ( + // DefaultSocketPath is the default socket path for external plugins. + DefaultSocketPath = "/var/run/nri/nri.sock" + // PluginSocketEnvVar is used to inform plugins about pre-connected sockets. + PluginSocketEnvVar = "NRI_PLUGIN_SOCKET" + // PluginNameEnvVar is used to inform NRI-launched plugins about their name. + PluginNameEnvVar = "NRI_PLUGIN_NAME" + // PluginIdxEnvVar is used to inform NRI-launched plugins about their ID. + PluginIdxEnvVar = "NRI_PLUGIN_IDX" +) + +// ParsePluginName parses the (file)name of a plugin into an index and a base. +func ParsePluginName(name string) (string, string, error) { + split := strings.SplitN(name, "-", 2) + if len(split) < 2 { + return "", "", fmt.Errorf("invalid plugin name %q, idx-pluginname expected", name) + } + + if err := CheckPluginIndex(split[0]); err != nil { + return "", "", err + } + + return split[0], split[1], nil +} + +// CheckPluginIndex checks the validity of a plugin index. +func CheckPluginIndex(idx string) error { + if len(idx) != 2 { + return fmt.Errorf("invalid plugin index %q, must be 2 digits", idx) + } + if !('0' <= idx[0] && idx[0] <= '9') || !('0' <= idx[1] && idx[1] <= '9') { + return fmt.Errorf("invalid plugin index %q (not [0-9][0-9])", idx) + } + return nil +} diff --git a/vendor/github.com/containerd/nri/pkg/api/resources.go b/vendor/github.com/containerd/nri/pkg/api/resources.go new file mode 100644 index 0000000000..8adcb5e775 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/resources.go @@ -0,0 +1,249 @@ +//go:build !tinygo.wasm + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + rspec "github.com/opencontainers/runtime-spec/specs-go" + cri "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +// FromOCILinuxResources returns resources from an OCI runtime Spec. +func FromOCILinuxResources(o *rspec.LinuxResources, _ map[string]string) *LinuxResources { + if o == nil { + return nil + } + l := &LinuxResources{} + if m := o.Memory; m != nil { + l.Memory = &LinuxMemory{ + Limit: Int64(m.Limit), + Reservation: Int64(m.Reservation), + Swap: Int64(m.Swap), + Kernel: Int64(m.Kernel), + KernelTcp: Int64(m.KernelTCP), + Swappiness: UInt64(m.Swappiness), + DisableOomKiller: Bool(m.DisableOOMKiller), + UseHierarchy: Bool(m.UseHierarchy), + } + } + if c := o.CPU; c != nil { + l.Cpu = &LinuxCPU{ + Shares: UInt64(c.Shares), + Quota: Int64(c.Quota), + Period: UInt64(c.Period), + RealtimeRuntime: Int64(c.RealtimeRuntime), + RealtimePeriod: UInt64(c.RealtimePeriod), + Cpus: c.Cpus, + Mems: c.Mems, + } + } + for _, h := range o.HugepageLimits { + l.HugepageLimits = append(l.HugepageLimits, &HugepageLimit{ + PageSize: h.Pagesize, + Limit: h.Limit, + }) + } + for _, d := range o.Devices { + l.Devices = append(l.Devices, &LinuxDeviceCgroup{ + Allow: d.Allow, + Type: d.Type, + Major: Int64(d.Major), + Minor: Int64(d.Minor), + Access: d.Access, + }) + } + if p := o.Pids; p != nil { + l.Pids = &LinuxPids{ + Limit: p.Limit, + } + } + return l +} + +func FromCRILinuxResources(c *cri.LinuxContainerResources) *LinuxResources { + if c == nil { + return nil + } + shares, quota, period := uint64(c.CpuShares), c.CpuQuota, uint64(c.CpuPeriod) + r := &LinuxResources{ + Cpu: &LinuxCPU{ + Shares: UInt64(&shares), + Quota: Int64("a), + Period: UInt64(&period), + Cpus: c.CpusetCpus, + Mems: c.CpusetMems, + }, + Memory: &LinuxMemory{ + Limit: Int64(&c.MemoryLimitInBytes), + }, + } + for _, l := range c.HugepageLimits { + r.HugepageLimits = append(r.HugepageLimits, + &HugepageLimit{ + PageSize: l.PageSize, + Limit: l.Limit, + }) + } + return r +} + +// ToOCI returns resources for an OCI runtime Spec. +func (r *LinuxResources) ToOCI() *rspec.LinuxResources { + if r == nil { + return nil + } + o := &rspec.LinuxResources{ + CPU: &rspec.LinuxCPU{}, + Memory: &rspec.LinuxMemory{}, + } + if r.Memory != nil { + o.Memory = &rspec.LinuxMemory{ + Limit: r.Memory.Limit.Get(), + Reservation: r.Memory.Reservation.Get(), + Swap: r.Memory.Swap.Get(), + Kernel: r.Memory.Kernel.Get(), + KernelTCP: r.Memory.KernelTcp.Get(), + Swappiness: r.Memory.Swappiness.Get(), + DisableOOMKiller: r.Memory.DisableOomKiller.Get(), + UseHierarchy: r.Memory.UseHierarchy.Get(), + } + } + if r.Cpu != nil { + o.CPU = &rspec.LinuxCPU{ + Shares: r.Cpu.Shares.Get(), + Quota: r.Cpu.Quota.Get(), + Period: r.Cpu.Period.Get(), + RealtimeRuntime: r.Cpu.RealtimeRuntime.Get(), + RealtimePeriod: r.Cpu.RealtimePeriod.Get(), + Cpus: r.Cpu.Cpus, + Mems: r.Cpu.Mems, + } + } + for _, l := range r.HugepageLimits { + o.HugepageLimits = append(o.HugepageLimits, rspec.LinuxHugepageLimit{ + Pagesize: l.PageSize, + Limit: l.Limit, + }) + } + if len(r.Unified) != 0 { + o.Unified = make(map[string]string) + for k, v := range r.Unified { + o.Unified[k] = v + } + } + for _, d := range r.Devices { + o.Devices = append(o.Devices, rspec.LinuxDeviceCgroup{ + Allow: d.Allow, + Type: d.Type, + Major: d.Major.Get(), + Minor: d.Minor.Get(), + Access: d.Access, + }) + } + if r.Pids != nil { + o.Pids = &rspec.LinuxPids{ + Limit: r.Pids.Limit, + } + } + return o +} + +// ToCRI returns resources for CRI. +func (r *LinuxResources) ToCRI(oomScoreAdj int64) *cri.LinuxContainerResources { + if r == nil { + return nil + } + o := &cri.LinuxContainerResources{} + if r.Memory != nil { + o.MemoryLimitInBytes = r.Memory.GetLimit().GetValue() + o.OomScoreAdj = oomScoreAdj + } + if r.Cpu != nil { + o.CpuShares = int64(r.Cpu.GetShares().GetValue()) + o.CpuPeriod = int64(r.Cpu.GetPeriod().GetValue()) + o.CpuQuota = r.Cpu.GetQuota().GetValue() + o.CpusetCpus = r.Cpu.Cpus + o.CpusetMems = r.Cpu.Mems + } + for _, l := range r.HugepageLimits { + o.HugepageLimits = append(o.HugepageLimits, &cri.HugepageLimit{ + PageSize: l.PageSize, + Limit: l.Limit, + }) + } + if len(r.Unified) != 0 { + o.Unified = make(map[string]string) + for k, v := range r.Unified { + o.Unified[k] = v + } + } + + return o +} + +// Copy creates a copy of the resources. +func (r *LinuxResources) Copy() *LinuxResources { + if r == nil { + return nil + } + o := &LinuxResources{} + if r.Memory != nil { + o.Memory = &LinuxMemory{ + Limit: Int64(r.Memory.GetLimit()), + Reservation: Int64(r.Memory.GetReservation()), + Swap: Int64(r.Memory.GetSwap()), + Kernel: Int64(r.Memory.GetKernel()), + KernelTcp: Int64(r.Memory.GetKernelTcp()), + Swappiness: UInt64(r.Memory.GetSwappiness()), + DisableOomKiller: Bool(r.Memory.GetDisableOomKiller()), + UseHierarchy: Bool(r.Memory.GetUseHierarchy()), + } + } + if r.Cpu != nil { + o.Cpu = &LinuxCPU{ + Shares: UInt64(r.Cpu.GetShares()), + Quota: Int64(r.Cpu.GetQuota()), + Period: UInt64(r.Cpu.GetPeriod()), + RealtimeRuntime: Int64(r.Cpu.GetRealtimeRuntime()), + RealtimePeriod: UInt64(r.Cpu.GetRealtimePeriod()), + Cpus: r.Cpu.GetCpus(), + Mems: r.Cpu.GetMems(), + } + } + for _, l := range r.HugepageLimits { + o.HugepageLimits = append(o.HugepageLimits, &HugepageLimit{ + PageSize: l.PageSize, + Limit: l.Limit, + }) + } + if len(r.Unified) != 0 { + o.Unified = make(map[string]string) + for k, v := range r.Unified { + o.Unified[k] = v + } + } + if r.Pids != nil { + o.Pids = &LinuxPids{ + Limit: r.Pids.Limit, + } + } + o.BlockioClass = String(r.BlockioClass) + o.RdtClass = String(r.RdtClass) + + return o +} diff --git a/vendor/github.com/containerd/nri/pkg/api/timeouts.go b/vendor/github.com/containerd/nri/pkg/api/timeouts.go new file mode 100644 index 0000000000..3d056ae5ea --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/timeouts.go @@ -0,0 +1,28 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +import ( + "time" +) + +const ( + // DefaultPluginRegistrationTimeout is the default timeout for plugin registration. + DefaultPluginRegistrationTimeout = 5 * time.Second + // DefaultPluginRequestTimeout is the default timeout for plugins to handle a request. + DefaultPluginRequestTimeout = 2 * time.Second +) diff --git a/vendor/github.com/containerd/nri/pkg/api/update.go b/vendor/github.com/containerd/nri/pkg/api/update.go new file mode 100644 index 0000000000..04ac3925d5 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/api/update.go @@ -0,0 +1,199 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package api + +//nolint +// SetContainerId sets the id of the container to update. +func (u *ContainerUpdate) SetContainerId(id string) { + u.ContainerId = id +} + +// SetLinuxMemoryLimit records setting the memory limit for a container. +func (u *ContainerUpdate) SetLinuxMemoryLimit(value int64) { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.Limit = Int64(value) +} + +// SetLinuxMemoryReservation records setting the memory reservation for a container. +func (u *ContainerUpdate) SetLinuxMemoryReservation(value int64) { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.Reservation = Int64(value) +} + +// SetLinuxMemorySwap records records setting the memory swap limit for a container. +func (u *ContainerUpdate) SetLinuxMemorySwap(value int64) { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.Swap = Int64(value) +} + +// SetLinuxMemoryKernel records setting the memory kernel limit for a container. +func (u *ContainerUpdate) SetLinuxMemoryKernel(value int64) { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.Kernel = Int64(value) +} + +// SetLinuxMemoryKernelTCP records setting the memory kernel TCP limit for a container. +func (u *ContainerUpdate) SetLinuxMemoryKernelTCP(value int64) { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.KernelTcp = Int64(value) +} + +// SetLinuxMemorySwappiness records setting the memory swappiness for a container. +func (u *ContainerUpdate) SetLinuxMemorySwappiness(value uint64) { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.Swappiness = UInt64(value) +} + +// SetLinuxMemoryDisableOomKiller records disabling the OOM killer for a container. +func (u *ContainerUpdate) SetLinuxMemoryDisableOomKiller() { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.DisableOomKiller = Bool(true) +} + +// SetLinuxMemoryUseHierarchy records enabling hierarchical memory accounting for a container. +func (u *ContainerUpdate) SetLinuxMemoryUseHierarchy() { + u.initLinuxResourcesMemory() + u.Linux.Resources.Memory.UseHierarchy = Bool(true) +} + +// SetLinuxCPUShares records setting the scheduler's CPU shares for a container. +func (u *ContainerUpdate) SetLinuxCPUShares(value uint64) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.Shares = UInt64(value) +} + +// SetLinuxCPUQuota records setting the scheduler's CPU quota for a container. +func (u *ContainerUpdate) SetLinuxCPUQuota(value int64) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.Quota = Int64(value) +} + +// SetLinuxCPUPeriod records setting the scheduler's CPU period for a container. +func (u *ContainerUpdate) SetLinuxCPUPeriod(value int64) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.Period = UInt64(value) +} + +// SetLinuxCPURealtimeRuntime records setting the scheduler's realtime runtime for a container. +func (u *ContainerUpdate) SetLinuxCPURealtimeRuntime(value int64) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.RealtimeRuntime = Int64(value) +} + +// SetLinuxCPURealtimePeriod records setting the scheduler's realtime period for a container. +func (u *ContainerUpdate) SetLinuxCPURealtimePeriod(value uint64) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.RealtimePeriod = UInt64(value) +} + +// SetLinuxCPUSetCPUs records setting the cpuset CPUs for a container. +func (u *ContainerUpdate) SetLinuxCPUSetCPUs(value string) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.Cpus = value +} + +// SetLinuxCPUSetMems records setting the cpuset memory for a container. +func (u *ContainerUpdate) SetLinuxCPUSetMems(value string) { + u.initLinuxResourcesCPU() + u.Linux.Resources.Cpu.Mems = value +} + +// SetLinuxPidLimits records setting the pid max number for a container. +func (u *ContainerUpdate) SetLinuxPidLimits(value int64) { + u.initLinuxResourcesPids() + u.Linux.Resources.Pids.Limit = value +} + +// AddLinuxHugepageLimit records adding a hugepage limit for a container. +func (u *ContainerUpdate) AddLinuxHugepageLimit(pageSize string, value uint64) { + u.initLinuxResources() + u.Linux.Resources.HugepageLimits = append(u.Linux.Resources.HugepageLimits, + &HugepageLimit{ + PageSize: pageSize, + Limit: value, + }) +} + +// SetLinuxBlockIOClass records setting the Block I/O class for a container. +func (u *ContainerUpdate) SetLinuxBlockIOClass(value string) { + u.initLinuxResources() + u.Linux.Resources.BlockioClass = String(value) +} + +// SetLinuxRDTClass records setting the RDT class for a container. +func (u *ContainerUpdate) SetLinuxRDTClass(value string) { + u.initLinuxResources() + u.Linux.Resources.RdtClass = String(value) +} + +// AddLinuxUnified sets a cgroupv2 unified resource. +func (u *ContainerUpdate) AddLinuxUnified(key, value string) { + u.initLinuxResourcesUnified() + u.Linux.Resources.Unified[key] = value +} + +// SetIgnoreFailure marks an Update as ignored for failures. +// Such updates will not prevent the related container operation +// from succeeding if the update fails. +func (u *ContainerUpdate) SetIgnoreFailure() { + u.IgnoreFailure = true +} + +// +// Initializing a container update. +// + +func (u *ContainerUpdate) initLinux() { + if u.Linux == nil { + u.Linux = &LinuxContainerUpdate{} + } +} + +func (u *ContainerUpdate) initLinuxResources() { + u.initLinux() + if u.Linux.Resources == nil { + u.Linux.Resources = &LinuxResources{} + } +} + +func (u *ContainerUpdate) initLinuxResourcesMemory() { + u.initLinuxResources() + if u.Linux.Resources.Memory == nil { + u.Linux.Resources.Memory = &LinuxMemory{} + } +} + +func (u *ContainerUpdate) initLinuxResourcesCPU() { + u.initLinuxResources() + if u.Linux.Resources.Cpu == nil { + u.Linux.Resources.Cpu = &LinuxCPU{} + } +} + +func (u *ContainerUpdate) initLinuxResourcesUnified() { + u.initLinuxResources() + if u.Linux.Resources.Unified == nil { + u.Linux.Resources.Unified = make(map[string]string) + } +} + +func (u *ContainerUpdate) initLinuxResourcesPids() { + u.initLinuxResources() + if u.Linux.Resources.Pids == nil { + u.Linux.Resources.Pids = &LinuxPids{} + } +} diff --git a/vendor/github.com/containerd/nri/pkg/log/log.go b/vendor/github.com/containerd/nri/pkg/log/log.go new file mode 100644 index 0000000000..91337b59ef --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/log/log.go @@ -0,0 +1,87 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/sirupsen/logrus" +) + +var ( + log Logger = &fallbackLogger{} +) + +// Logger is the interface NRI uses for logging. +type Logger interface { + Debugf(ctx context.Context, format string, args ...interface{}) + Infof(ctx context.Context, format string, args ...interface{}) + Warnf(ctx context.Context, format string, args ...interface{}) + Errorf(ctx context.Context, format string, args ...interface{}) +} + +// Set the logger used by NRI. +func Set(l Logger) { + log = l +} + +// Get the logger used by NRI. +func Get() Logger { + return log +} + +// Debugf logs a formatted debug message. +func Debugf(ctx context.Context, format string, args ...interface{}) { + log.Debugf(ctx, format, args...) +} + +// Infof logs a formatted informational message. +func Infof(ctx context.Context, format string, args ...interface{}) { + log.Infof(ctx, format, args...) +} + +// Warnf logs a formatted warning message. +func Warnf(ctx context.Context, format string, args ...interface{}) { + log.Warnf(ctx, format, args...) +} + +// Errorf logs a formatted error message. +func Errorf(ctx context.Context, format string, args ...interface{}) { + log.Errorf(ctx, format, args...) +} + +type fallbackLogger struct{} + +// Debugf logs a formatted debug message. +func (f *fallbackLogger) Debugf(ctx context.Context, format string, args ...interface{}) { + logrus.WithContext(ctx).Debugf(format, args...) +} + +// Infof logs a formatted informational message. +func (f *fallbackLogger) Infof(ctx context.Context, format string, args ...interface{}) { + logrus.WithContext(ctx).Infof(format, args...) +} + +// Warnf logs a formatted warning message. +func (f *fallbackLogger) Warnf(ctx context.Context, format string, args ...interface{}) { + logrus.WithContext(ctx).Warnf(format, args...) +} + +// Errorf logs a formatted error message. +func (f *fallbackLogger) Errorf(ctx context.Context, format string, args ...interface{}) { + logrus.WithContext(ctx).Errorf(format, args...) +} diff --git a/vendor/github.com/containerd/nri/pkg/net/conn.go b/vendor/github.com/containerd/nri/pkg/net/conn.go new file mode 100644 index 0000000000..db7d2bc626 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/conn.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package net + +import ( + "fmt" + "io" + "net" + "os" + "strconv" + "sync" +) + +// NewFdConn creates a net.Conn for the given (socket) fd. +func NewFdConn(fd int) (net.Conn, error) { + f := os.NewFile(uintptr(fd), "fd #"+strconv.Itoa(fd)) + + conn, err := net.FileConn(f) + if err != nil { + return nil, fmt.Errorf("failed to create net.Conn for fd #%d: %w", fd, err) + } + f.Close() + + return conn, nil +} + +// connListener wraps a pre-connected socket in a net.Listener. +type connListener struct { + next chan net.Conn + conn net.Conn + addr net.Addr + lock sync.RWMutex // for Close() + closed bool +} + +// NewConnListener wraps an existing net.Conn in a net.Listener. +// +// The first call to Accept() on the listener will return the wrapped +// connection. Subsequent calls to Accept() block until the listener +// is closed, then return io.EOF. Close() closes the listener and the +// wrapped connection. +func NewConnListener(conn net.Conn) net.Listener { + next := make(chan net.Conn, 1) + next <- conn + + return &connListener{ + next: next, + conn: conn, + addr: conn.LocalAddr(), + } +} + +// Accept returns the wrapped connection when it is called the first +// time. Later calls to Accept block until the listener is closed, then +// return io.EOF. +func (l *connListener) Accept() (net.Conn, error) { + conn := <-l.next + if conn == nil { + return nil, io.EOF + } + return conn, nil +} + +// Close closes the listener and the wrapped connection. +func (l *connListener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closed { + return nil + } + close(l.next) + l.closed = true + return l.conn.Close() +} + +// Addr returns the local address of the wrapped connection. +func (l *connListener) Addr() net.Addr { + return l.addr +} diff --git a/vendor/github.com/containerd/nri/pkg/net/multiplex/mux.go b/vendor/github.com/containerd/nri/pkg/net/multiplex/mux.go new file mode 100644 index 0000000000..dc6ba50067 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/multiplex/mux.go @@ -0,0 +1,460 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package multiplex + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "sync" + "syscall" + "time" + + nrinet "github.com/containerd/nri/pkg/net" + "github.com/containerd/ttrpc" +) + +// Mux multiplexes several logical connections over a single net.Conn. +// +// Connections are identified within a Mux by ConnIDs which are simple +// 32-bit unsigned integers. Opening a connection returns a net.Conn +// corrponding to the ConnID. This can then be used to write and read +// data through the connection with the Mux performing multiplexing +// and demultiplexing of data. +// +// Writing to a connection is fully synchronous. The caller can safely +// reuse the buffer once the call returns. Reading from a connection +// returns the oldest demultiplexed buffer for the connection, blocking +// if the connections incoming queue is empty. If any incoming queue is +// ever overflown the underlying trunk and all multiplexed connections +// are closed and an error is recorded. This error is later returned by +// any subsequent read from any connection. All connections of the Mux +// have the same fixed incoming queue length which can be configured +// using the WithReadQueueLength Option during Mux creation. +// +// The Mux interface also provides functions that emulate net.Dial and +// net.Listen for a connection. Usually these can be used for passing +// multiplexed connections to packages that insist to Dial or Accept +// themselves for connection establishment. +// +// Note that opening a connection is a virtual operation in the sense +// that it has no effects outside the Mux. It is performed without any +// signalling or other communication. It merely acquires the net.Conn +// corresponding to the connection and blindly assumes that the same +// ConnID is or will be opened at the other end of the Mux. +type Mux interface { + // Open the connection for the given ConnID. + Open(ConnID) (net.Conn, error) + + // Close the Mux and all connections associated with it. + Close() error + + // Dialer returns a net.Dial-like function for the connection. + // + // Calling the returned function (with arguments) will return a + // net.Conn for the connection. + Dialer(ConnID) func(string, string) (net.Conn, error) + + // Listener returns a net.Listener for the connection. The first + // call to Accept() on the listener will return a net.Conn for the + // connection. Subsequent calls to Accept() will block until the + // connection is closed then return io.EOF. + Listen(ConnID) (net.Listener, error) + + // Trunk returns the trunk connection for the Mux. + Trunk() net.Conn + + // Unblock unblocks the Mux reader. + Unblock() +} + +// ConnID uniquely identifies a logical connection within a Mux. +type ConnID uint32 + +const ( + // ConnID 0 is reserved for future use. + reservedConnID ConnID = iota + // LowestConnID is the lowest externally usable ConnID. + LowestConnID +) + +// Option to apply to a Mux. +type Option func(*mux) + +// WithBlockedRead causes the Mux to be blocked for reading until gets Unblock()'ed. +func WithBlockedRead() Option { + return func(m *mux) { + if m.blockC == nil { + m.blockC = make(chan struct{}) + } + } +} + +// WithReadQueueLength overrides the default read queue size. +func WithReadQueueLength(length int) Option { + return func(m *mux) { + m.qlen = length + } +} + +// Multiplex returns a multiplexer for the given connection. +func Multiplex(trunk net.Conn, options ...Option) Mux { + return newMux(trunk, options...) +} + +// mux is our implementation of Mux. +type mux struct { + trunk net.Conn + writeLock sync.Mutex + conns map[ConnID]*conn + connLock sync.RWMutex + qlen int + errOnce sync.Once + err error + unblkOnce sync.Once + blockC chan struct{} + closeOnce sync.Once + doneC chan struct{} +} + +const ( + // default read queue length for a single connection + readQueueLen = 256 + // length of frame header: 4-byte ConnID, 4-byte payload length + headerLen = 8 + // max. allowed payload size + maxPayloadSize = ttrpcMessageHeaderLength + ttrpcMessageLengthMax +) + +// conn represents a single multiplexed connection. +type conn struct { + id ConnID + mux *mux + readC chan []byte + closeOnce sync.Once + doneC chan error +} + +func newMux(trunk net.Conn, options ...Option) *mux { + m := &mux{ + trunk: trunk, + conns: make(map[ConnID]*conn), + qlen: readQueueLen, + doneC: make(chan struct{}), + } + + for _, o := range options { + o(m) + } + + if m.blockC == nil { + WithBlockedRead()(m) + m.Unblock() + } + + go m.reader() + + return m +} + +func (m *mux) Trunk() net.Conn { + return m.trunk +} + +func (m *mux) Unblock() { + m.unblkOnce.Do(func() { + close(m.blockC) + }) +} + +func (m *mux) Open(id ConnID) (net.Conn, error) { + if id == reservedConnID { + return nil, fmt.Errorf("ConnID %d is reserved", id) + } + + m.connLock.Lock() + defer m.connLock.Unlock() + + c, ok := m.conns[id] + if !ok { + c = &conn{ + id: id, + mux: m, + doneC: make(chan error, 1), + readC: make(chan []byte, m.qlen), + } + m.conns[id] = c + } + + return c, nil +} + +func (m *mux) Close() error { + m.closeOnce.Do(func() { + m.connLock.Lock() + defer m.connLock.Unlock() + for _, conn := range m.conns { + conn.close() + } + close(m.doneC) + m.trunk.Close() + }) + + return nil +} + +func (m *mux) Dialer(id ConnID) func(string, string) (net.Conn, error) { + return func(string, string) (net.Conn, error) { + return m.Open(id) + } +} + +func (m *mux) Listen(id ConnID) (net.Listener, error) { + conn, err := m.Open(id) + if err != nil { + return nil, err + } + return nrinet.NewConnListener(conn), nil +} + +func (m *mux) write(id ConnID, buf []byte) (int, error) { + var ( + hdr [headerLen]byte + data = buf[:] + size = len(data) + ) + + m.writeLock.Lock() + defer m.writeLock.Unlock() + + for { + if size > maxPayloadSize { + size = maxPayloadSize + } + + binary.BigEndian.PutUint32(hdr[0:4], uint32(id)) + binary.BigEndian.PutUint32(hdr[4:8], uint32(size)) + + n, err := m.trunk.Write(hdr[:]) + if err != nil { + err = fmt.Errorf("failed to write header to trunk: %w", err) + if n != 0 { + m.setError(err) + m.Close() + } + return 0, err + } + + n, err = m.trunk.Write(data[:size]) + if err != nil { + err = fmt.Errorf("failed to write payload to trunk: %w", err) + if n != 0 { + m.setError(err) + m.Close() + } + return 0, err + } + + data = data[size:] + if size > len(data) { + size = len(data) + } + + if size == 0 { + break + } + } + + return len(buf), nil +} + +func (m *mux) reader() { + var ( + hdr [headerLen]byte + cid uint32 + cnt uint32 + buf []byte + err error + ) + + <-m.blockC + + for { + select { + case <-m.doneC: + return + default: + } + + _, err = io.ReadFull(m.trunk, hdr[:]) + if err != nil { + switch { + case errors.Is(err, io.EOF): + case errors.Is(err, ttrpc.ErrClosed): + err = io.EOF + case errors.Is(err, ttrpc.ErrServerClosed): + err = io.EOF + case errors.Is(err, net.ErrClosed): + err = io.EOF + default: + err = fmt.Errorf("failed to read header from trunk: %w", err) + } + m.setError(err) + m.Close() + return + } + + cid = binary.BigEndian.Uint32(hdr[0:4]) + cnt = binary.BigEndian.Uint32(hdr[4:8]) + buf = make([]byte, int(cnt)) + + _, err = io.ReadFull(m.trunk, buf) + if err != nil { + switch { + case errors.Is(err, io.EOF): + case errors.Is(err, ttrpc.ErrClosed): + err = io.EOF + case errors.Is(err, ttrpc.ErrServerClosed): + err = io.EOF + case errors.Is(err, net.ErrClosed): + err = io.EOF + default: + err = fmt.Errorf("failed to read payload from trunk: %w", err) + } + m.setError(err) + m.Close() + return + } + + m.connLock.RLock() + conn, ok := m.conns[ConnID(cid)] + m.connLock.RUnlock() + if ok { + select { + case conn.readC <- buf: + default: + m.setError(errors.New("failed to queue payload for reading")) + m.Close() + return + } + } + } +} + +func (m *mux) setError(err error) { + m.errOnce.Do(func() { + m.err = err + }) +} + +// nolint +func (m *mux) error() error { + m.errOnce.Do(func() { + if m.err == nil { + m.err = io.EOF + } + }) + return m.err +} + +// +// multiplexed connections +// + +// Reads reads the next message from the multiplexed connection. +func (c *conn) Read(buf []byte) (int, error) { + var ( + msg []byte + err error + ok bool + ) + + select { + case err, ok = <-c.doneC: + if !ok || err == nil { + err = c.mux.error() + } + return 0, err + case msg, ok = <-c.readC: + if !ok { + return 0, c.mux.error() + } + if cap(buf) < len(msg) { + return 0, syscall.ENOMEM + } + } + + copy(buf, msg) + return len(msg), nil +} + +// Write writes the given data to the multiplexed connection. +func (c *conn) Write(b []byte) (int, error) { + select { + case err := <-c.doneC: + if err == nil { + err = io.EOF + } + return 0, err + default: + } + return c.mux.write(c.id, b) +} + +// Close closes the multiplexed connection. +func (c *conn) Close() error { + c.mux.connLock.Lock() + defer c.mux.connLock.Unlock() + if c.mux.conns[c.id] == c { + delete(c.mux.conns, c.id) + } + return c.close() +} + +func (c *conn) close() error { + c.closeOnce.Do(func() { + close(c.doneC) + }) + return nil +} + +// LocalAddr is the unimplemented stub for the corresponding net.Conn function. +func (c *conn) LocalAddr() net.Addr { + return nil +} + +// RemoteAddr is the unimplemented stub for the corresponding net.Conn function. +func (c *conn) RemoteAddr() net.Addr { + return nil +} + +// SetDeadline is the unimplemented stub for the corresponding net.Conn function. +func (c *conn) SetDeadline(_ time.Time) error { + return nil +} + +// SetReadDeadline is the unimplemented stub for the corresponding net.Conn function. +func (c *conn) SetReadDeadline(_ time.Time) error { + return nil +} + +// SetWriteDeadline is the unimplemented stub for the corresponding net.Conn function. +func (c *conn) SetWriteDeadline(_ time.Time) error { + return nil +} diff --git a/vendor/github.com/containerd/nri/pkg/net/multiplex/ttrpc.go b/vendor/github.com/containerd/nri/pkg/net/multiplex/ttrpc.go new file mode 100644 index 0000000000..a195c5aa61 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/multiplex/ttrpc.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package multiplex + +const ( + // PluginServiceConn is the mux connection ID for NRI plugin services. + PluginServiceConn ConnID = iota + 1 + // RuntimeServiceConn is the mux connection ID for NRI runtime services. + RuntimeServiceConn +) + +const ( + ttrpcMessageHeaderLength = 10 + ttrpcMessageLengthMax = 4 << 20 +) diff --git a/vendor/github.com/containerd/nri/pkg/net/socketpair.go b/vendor/github.com/containerd/nri/pkg/net/socketpair.go new file mode 100644 index 0000000000..620887db41 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/socketpair.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" + "os" +) + +// SocketPair contains the os.File of a connected pair of sockets. +type SocketPair struct { + local, peer *os.File +} + +// NewSocketPair returns a connected pair of sockets. +func NewSocketPair() (SocketPair, error) { + fds, err := newSocketPairCLOEXEC() + if err != nil { + return SocketPair{nil, nil}, fmt.Errorf("failed to create socketpair: %w", err) + } + + filename := fmt.Sprintf("socketpair-#%d:%d", fds[0], fds[1]) + + return SocketPair{ + os.NewFile(uintptr(fds[0]), filename+"[0]"), + os.NewFile(uintptr(fds[1]), filename+"[1]"), + }, nil +} + +// LocalFile returns the local end of the socketpair as an *os.File. +func (sp SocketPair) LocalFile() *os.File { + return sp.local +} + +// PeerFile returns the peer end of the socketpair as an *os.File. +func (sp SocketPair) PeerFile() *os.File { + return sp.peer +} + +// LocalConn returns a net.Conn for the local end of the socketpair. +// This closes LocalFile(). +func (sp SocketPair) LocalConn() (net.Conn, error) { + file := sp.LocalFile() + defer file.Close() + conn, err := net.FileConn(file) + if err != nil { + return nil, fmt.Errorf("failed to create net.Conn for %s: %w", file.Name(), err) + } + return conn, nil +} + +// PeerConn returns a net.Conn for the peer end of the socketpair. +// This closes PeerFile(). +func (sp SocketPair) PeerConn() (net.Conn, error) { + file := sp.PeerFile() + defer file.Close() + conn, err := net.FileConn(file) + if err != nil { + return nil, fmt.Errorf("failed to create net.Conn for %s: %w", file.Name(), err) + } + return conn, nil +} + +// Close closes both ends of the socketpair. +func (sp SocketPair) Close() { + sp.LocalClose() + sp.PeerClose() +} + +// LocalClose closes the local end of the socketpair. +func (sp SocketPair) LocalClose() { + sp.local.Close() +} + +// PeerClose closes the peer end of the socketpair. +func (sp SocketPair) PeerClose() { + sp.peer.Close() +} diff --git a/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_linux.go b/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_linux.go new file mode 100644 index 0000000000..0ca83f68a6 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_linux.go @@ -0,0 +1,27 @@ +//go:build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package net + +import ( + "golang.org/x/sys/unix" +) + +func newSocketPairCLOEXEC() ([2]int, error) { + return unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0) +} diff --git a/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_unix.go b/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_unix.go new file mode 100644 index 0000000000..ed3c2f992d --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_unix.go @@ -0,0 +1,38 @@ +//go:build !linux && !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package net + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func newSocketPairCLOEXEC() ([2]int, error) { + syscall.ForkLock.RLock() + defer syscall.ForkLock.RUnlock() + fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0) + if err != nil { + return fds, err + } + unix.CloseOnExec(fds[0]) + unix.CloseOnExec(fds[1]) + + return fds, err +} diff --git a/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_windows.go b/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_windows.go new file mode 100644 index 0000000000..033cf65e48 --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/net/socketpair_cloexec_windows.go @@ -0,0 +1,30 @@ +//go:build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package net + +import ( + "errors" + + sys "golang.org/x/sys/windows" +) + +func newSocketPairCLOEXEC() ([2]sys.Handle, error) { + // when implementing do use WSA_FLAG_NO_HANDLE_INHERIT to avoid leaking FDs + return [2]sys.Handle{sys.InvalidHandle, sys.InvalidHandle}, errors.New("newSocketPairCLOEXEC unimplemented for windows") +} diff --git a/vendor/github.com/containerd/nri/pkg/stub/stub.go b/vendor/github.com/containerd/nri/pkg/stub/stub.go new file mode 100644 index 0000000000..6915e3b7eb --- /dev/null +++ b/vendor/github.com/containerd/nri/pkg/stub/stub.go @@ -0,0 +1,868 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package stub + +import ( + "context" + "errors" + "fmt" + stdnet "net" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/containerd/nri/pkg/api" + nrilog "github.com/containerd/nri/pkg/log" + "github.com/containerd/nri/pkg/net" + "github.com/containerd/nri/pkg/net/multiplex" + "github.com/containerd/ttrpc" +) + +// Plugin can implement a number of interfaces related to Pod and Container +// lifecycle events. No any single such interface is mandatory, therefore the +// Plugin interface itself is empty. Plugins are required to implement at +// least one of these interfaces and this is verified during stub creation. +// Trying to create a stub for a plugin violating this requirement will fail +// with and error. +type Plugin interface{} + +// ConfigureInterface handles Configure API request. +type ConfigureInterface interface { + // Configure the plugin with the given NRI-supplied configuration. + // If a non-zero EventMask is returned, the plugin will be subscribed + // to the corresponding. + Configure(ctx context.Context, config, runtime, version string) (api.EventMask, error) +} + +// SynchronizeInterface handles Synchronize API requests. +type SynchronizeInterface interface { + // Synchronize the state of the plugin with the runtime. + // The plugin can request updates to containers in response. + Synchronize(context.Context, []*api.PodSandbox, []*api.Container) ([]*api.ContainerUpdate, error) +} + +// ShutdownInterface handles a Shutdown API request. +type ShutdownInterface interface { + // Shutdown notifies the plugin about the runtime shutting down. + Shutdown(context.Context) +} + +// RunPodInterface handles RunPodSandbox API events. +type RunPodInterface interface { + // RunPodSandbox relays a RunPodSandbox event to the plugin. + RunPodSandbox(context.Context, *api.PodSandbox) error +} + +// StopPodInterface handles StopPodSandbox API events. +type StopPodInterface interface { + // StopPodSandbox relays a StopPodSandbox event to the plugin. + StopPodSandbox(context.Context, *api.PodSandbox) error +} + +// RemovePodInterface handles RemovePodSandbox API events. +type RemovePodInterface interface { + // RemovePodSandbox relays a RemovePodSandbox event to the plugin. + RemovePodSandbox(context.Context, *api.PodSandbox) error +} + +// CreateContainerInterface handles CreateContainer API requests. +type CreateContainerInterface interface { + // CreateContainer relays a CreateContainer request to the plugin. + // The plugin can request adjustments to the container being created + // and updates to other unstopped containers in response. + CreateContainer(context.Context, *api.PodSandbox, *api.Container) (*api.ContainerAdjustment, []*api.ContainerUpdate, error) +} + +// StartContainerInterface handles StartContainer API requests. +type StartContainerInterface interface { + // StartContainer relays a StartContainer event to the plugin. + StartContainer(context.Context, *api.PodSandbox, *api.Container) error +} + +// UpdateContainerInterface handles UpdateContainer API requests. +type UpdateContainerInterface interface { + // UpdateContainer relays an UpdateContainer request to the plugin. + // The plugin can request updates both to the container being updated + // (which then supersedes the original update) and to other unstopped + // containers in response. + UpdateContainer(context.Context, *api.PodSandbox, *api.Container, *api.LinuxResources) ([]*api.ContainerUpdate, error) +} + +// StopContainerInterface handles StopContainer API requests. +type StopContainerInterface interface { + // StopContainer relays a StopContainer request to the plugin. + // The plugin can request updates to unstopped containers in response. + StopContainer(context.Context, *api.PodSandbox, *api.Container) ([]*api.ContainerUpdate, error) +} + +// RemoveContainerInterface handles RemoveContainer API events. +type RemoveContainerInterface interface { + // RemoveContainer relays a RemoveContainer event to the plugin. + RemoveContainer(context.Context, *api.PodSandbox, *api.Container) error +} + +// PostCreateContainerInterface handles PostCreateContainer API events. +type PostCreateContainerInterface interface { + // PostCreateContainer relays a PostCreateContainer event to the plugin. + PostCreateContainer(context.Context, *api.PodSandbox, *api.Container) error +} + +// PostStartContainerInterface handles PostStartContainer API events. +type PostStartContainerInterface interface { + // PostStartContainer relays a PostStartContainer event to the plugin. + PostStartContainer(context.Context, *api.PodSandbox, *api.Container) error +} + +// PostUpdateContainerInterface handles PostUpdateContainer API events. +type PostUpdateContainerInterface interface { + // PostUpdateContainer relays a PostUpdateContainer event to the plugin. + PostUpdateContainer(context.Context, *api.PodSandbox, *api.Container) error +} + +// Stub is the interface the stub provides for the plugin implementation. +type Stub interface { + // Run starts the plugin then waits for the plugin service to exit, either due to a + // critical error or an explicit call to Stop(). Once Run() returns, the plugin can be + // restarted by calling Run() or Start() again. + Run(context.Context) error + // Start the plugin. + Start(context.Context) error + // Stop the plugin. + Stop() + // Wait for the plugin to stop. + Wait() + + // UpdateContainer requests unsolicited updates to containers. + UpdateContainers([]*api.ContainerUpdate) ([]*api.ContainerUpdate, error) + + // RegistrationTimeout returns the registration timeout for the stub. + // This is the default timeout if the plugin has not been started or + // the timeout received in the Configure request otherwise. + RegistrationTimeout() time.Duration + + // RequestTimeout returns the request timeout for the stub. + // This is the default timeout if the plugin has not been started or + // the timeout received in the Configure request otherwise. + RequestTimeout() time.Duration +} + +const ( + // DefaultRegistrationTimeout is the default plugin registration timeout. + DefaultRegistrationTimeout = api.DefaultPluginRegistrationTimeout + // DefaultRequestTimeout is the default plugin request processing timeout. + DefaultRequestTimeout = api.DefaultPluginRequestTimeout +) + +var ( + // Logger for messages generated internally by the stub itself. + log = nrilog.Get() + + // Used instead of a nil Context in logging. + noCtx = context.TODO() + + // ErrNoService indicates that the stub has no runtime service/connection, + // for instance by UpdateContainers on a stub which has not been started. + ErrNoService = errors.New("stub: no service/connection") +) + +// EventMask holds a mask of events for plugin subscription. +type EventMask = api.EventMask + +// Option to apply to a plugin during its creation. +type Option func(*stub) error + +// WithOnClose sets a notification function to call if the ttRPC connection goes down. +func WithOnClose(onClose func()) Option { + return func(s *stub) error { + s.onClose = onClose + return nil + } +} + +// WithPluginName sets the name to use in plugin registration. +func WithPluginName(name string) Option { + return func(s *stub) error { + if s.name != "" { + return fmt.Errorf("plugin name already set (%q)", s.name) + } + s.name = name + return nil + } +} + +// WithPluginIdx sets the index to use in plugin registration. +func WithPluginIdx(idx string) Option { + return func(s *stub) error { + if s.idx != "" { + return fmt.Errorf("plugin ID already set (%q)", s.idx) + } + s.idx = idx + return nil + } +} + +// WithSocketPath sets the NRI socket path to connect to. +func WithSocketPath(path string) Option { + return func(s *stub) error { + s.socketPath = path + return nil + } +} + +// WithConnection sets an existing NRI connection to use. +func WithConnection(conn stdnet.Conn) Option { + return func(s *stub) error { + s.conn = conn + return nil + } +} + +// WithDialer sets the dialer to use. +func WithDialer(d func(string) (stdnet.Conn, error)) Option { + return func(s *stub) error { + s.dialer = d + return nil + } +} + +// WithTTRPCOptions sets extra client and server options to use for ttrpc . +func WithTTRPCOptions(clientOpts []ttrpc.ClientOpts, serverOpts []ttrpc.ServerOpt) Option { + return func(s *stub) error { + s.clientOpts = append(s.clientOpts, clientOpts...) + s.serverOpts = append(s.serverOpts, serverOpts...) + return nil + } +} + +// stub implements Stub. +type stub struct { + sync.Mutex + plugin interface{} + handlers handlers + events api.EventMask + name string + idx string + socketPath string + dialer func(string) (stdnet.Conn, error) + conn stdnet.Conn + onClose func() + serverOpts []ttrpc.ServerOpt + clientOpts []ttrpc.ClientOpts + rpcm multiplex.Mux + rpcl stdnet.Listener + rpcs *ttrpc.Server + rpcc *ttrpc.Client + runtime api.RuntimeService + started bool + doneC chan struct{} + srvErrC chan error + cfgErrC chan error + syncReq *api.SynchronizeRequest + + registrationTimeout time.Duration + requestTimeout time.Duration +} + +// Handlers for NRI plugin event and request. +type handlers struct { + Configure func(context.Context, string, string, string) (api.EventMask, error) + Synchronize func(context.Context, []*api.PodSandbox, []*api.Container) ([]*api.ContainerUpdate, error) + Shutdown func(context.Context) + RunPodSandbox func(context.Context, *api.PodSandbox) error + StopPodSandbox func(context.Context, *api.PodSandbox) error + RemovePodSandbox func(context.Context, *api.PodSandbox) error + CreateContainer func(context.Context, *api.PodSandbox, *api.Container) (*api.ContainerAdjustment, []*api.ContainerUpdate, error) + StartContainer func(context.Context, *api.PodSandbox, *api.Container) error + UpdateContainer func(context.Context, *api.PodSandbox, *api.Container, *api.LinuxResources) ([]*api.ContainerUpdate, error) + StopContainer func(context.Context, *api.PodSandbox, *api.Container) ([]*api.ContainerUpdate, error) + RemoveContainer func(context.Context, *api.PodSandbox, *api.Container) error + PostCreateContainer func(context.Context, *api.PodSandbox, *api.Container) error + PostStartContainer func(context.Context, *api.PodSandbox, *api.Container) error + PostUpdateContainer func(context.Context, *api.PodSandbox, *api.Container) error +} + +// New creates a stub with the given plugin and options. +func New(p interface{}, opts ...Option) (Stub, error) { + stub := &stub{ + plugin: p, + name: os.Getenv(api.PluginNameEnvVar), + idx: os.Getenv(api.PluginIdxEnvVar), + socketPath: api.DefaultSocketPath, + dialer: func(p string) (stdnet.Conn, error) { return stdnet.Dial("unix", p) }, + + registrationTimeout: DefaultRegistrationTimeout, + requestTimeout: DefaultRequestTimeout, + } + + for _, o := range opts { + if err := o(stub); err != nil { + return nil, err + } + } + + if err := stub.setupHandlers(); err != nil { + return nil, err + } + + if err := stub.ensureIdentity(); err != nil { + return nil, err + } + + log.Infof(noCtx, "Created plugin %s (%s, handles %s)", stub.Name(), + filepath.Base(os.Args[0]), stub.events.PrettyString()) + + return stub, nil +} + +// Start event processing, register to NRI and wait for getting configured. +func (stub *stub) Start(ctx context.Context) (retErr error) { + stub.Lock() + defer stub.Unlock() + + if stub.isStarted() { + return fmt.Errorf("stub already started") + } + stub.doneC = make(chan struct{}) + + err := stub.connect() + if err != nil { + return err + } + + rpcm := multiplex.Multiplex(stub.conn) + defer func() { + if retErr != nil { + rpcm.Close() + stub.rpcm = nil + } + }() + + rpcl, err := rpcm.Listen(multiplex.PluginServiceConn) + if err != nil { + return err + } + defer func() { + if retErr != nil { + rpcl.Close() + stub.rpcl = nil + } + }() + + rpcs, err := ttrpc.NewServer(stub.serverOpts...) + if err != nil { + return fmt.Errorf("failed to create ttrpc server: %w", err) + } + defer func() { + if retErr != nil { + rpcs.Close() + stub.rpcs = nil + } + }() + + api.RegisterPluginService(rpcs, stub) + + conn, err := rpcm.Open(multiplex.RuntimeServiceConn) + if err != nil { + return fmt.Errorf("failed to multiplex ttrpc client connection: %w", err) + } + + clientOpts := []ttrpc.ClientOpts{ + ttrpc.WithOnClose(func() { + stub.connClosed() + }), + } + rpcc := ttrpc.NewClient(conn, append(clientOpts, stub.clientOpts...)...) + defer func() { + if retErr != nil { + rpcc.Close() + stub.rpcc = nil + } + }() + + stub.srvErrC = make(chan error, 1) + stub.cfgErrC = make(chan error, 1) + + go func(l stdnet.Listener, doneC chan struct{}, srvErrC chan error) { + srvErrC <- rpcs.Serve(ctx, l) + close(doneC) + }(rpcl, stub.doneC, stub.srvErrC) + + stub.rpcm = rpcm + stub.rpcl = rpcl + stub.rpcs = rpcs + stub.rpcc = rpcc + + stub.runtime = api.NewRuntimeClient(rpcc) + + if err = stub.register(ctx); err != nil { + stub.close() + return err + } + + if err = <-stub.cfgErrC; err != nil { + return err + } + + log.Infof(ctx, "Started plugin %s...", stub.Name()) + + stub.started = true + return nil +} + +// Stop the plugin. +func (stub *stub) Stop() { + log.Infof(noCtx, "Stopping plugin %s...", stub.Name()) + + stub.Lock() + defer stub.Unlock() + stub.close() +} + +// IsStarted returns true if the plugin has been started either by Start() or by Run(). +func (stub *stub) IsStarted() bool { + stub.Lock() + defer stub.Unlock() + return stub.isStarted() +} + +func (stub *stub) isStarted() bool { + return stub.started +} + +// reset stub to the status that can initiate a new +// NRI connection, the caller must hold lock. +func (stub *stub) close() { + if !stub.isStarted() { + return + } + + if stub.rpcl != nil { + stub.rpcl.Close() + } + if stub.rpcs != nil { + stub.rpcs.Close() + } + if stub.rpcc != nil { + stub.rpcc.Close() + } + if stub.rpcm != nil { + stub.rpcm.Close() + } + if stub.srvErrC != nil { + <-stub.doneC + } + + stub.started = false + stub.conn = nil + stub.syncReq = nil +} + +// Run the plugin. Start event processing then wait for an error or getting stopped. +func (stub *stub) Run(ctx context.Context) error { + var err error + + if err = stub.Start(ctx); err != nil { + return err + } + + err = <-stub.srvErrC + if err == ttrpc.ErrServerClosed { + return nil + } + + return err +} + +// Wait for the plugin to stop, should be called after Start() or Run(). +func (stub *stub) Wait() { + if stub.IsStarted() { + <-stub.doneC + } +} + +// Name returns the full indexed name of the plugin. +func (stub *stub) Name() string { + return stub.idx + "-" + stub.name +} + +func (stub *stub) RegistrationTimeout() time.Duration { + return stub.registrationTimeout +} + +func (stub *stub) RequestTimeout() time.Duration { + return stub.requestTimeout +} + +// Connect the plugin to NRI. +func (stub *stub) connect() error { + if stub.conn != nil { + log.Infof(noCtx, "Using given plugin connection...") + return nil + } + + if env := os.Getenv(api.PluginSocketEnvVar); env != "" { + log.Infof(noCtx, "Using connection %q from environment...", env) + + fd, err := strconv.Atoi(env) + if err != nil { + return fmt.Errorf("invalid socket in environment (%s=%q): %w", + api.PluginSocketEnvVar, env, err) + } + + stub.conn, err = net.NewFdConn(fd) + if err != nil { + return fmt.Errorf("invalid socket (%d) in environment: %w", fd, err) + } + + return nil + } + + conn, err := stub.dialer(stub.socketPath) + if err != nil { + return fmt.Errorf("failed to connect to NRI service: %w", err) + } + + stub.conn = conn + + return nil +} + +// Register the plugin with NRI. +func (stub *stub) register(ctx context.Context) error { + log.Infof(ctx, "Registering plugin %s...", stub.Name()) + + ctx, cancel := context.WithTimeout(ctx, stub.registrationTimeout) + defer cancel() + + req := &api.RegisterPluginRequest{ + PluginName: stub.name, + PluginIdx: stub.idx, + } + if _, err := stub.runtime.RegisterPlugin(ctx, req); err != nil { + return fmt.Errorf("failed to register with NRI/Runtime: %w", err) + } + + return nil +} + +// Handle a lost connection. +func (stub *stub) connClosed() { + stub.Lock() + stub.close() + stub.Unlock() + if stub.onClose != nil { + stub.onClose() + return + } + + os.Exit(0) +} + +// +// plugin event and request handlers +// + +// UpdateContainers requests unsolicited updates to containers. +func (stub *stub) UpdateContainers(update []*api.ContainerUpdate) ([]*api.ContainerUpdate, error) { + if stub.runtime == nil { + return nil, ErrNoService + } + + ctx := context.Background() + req := &api.UpdateContainersRequest{ + Update: update, + } + rpl, err := stub.runtime.UpdateContainers(ctx, req) + if rpl != nil { + return rpl.Failed, err + } + return nil, err +} + +// Configure the plugin. +func (stub *stub) Configure(ctx context.Context, req *api.ConfigureRequest) (rpl *api.ConfigureResponse, retErr error) { + var ( + events api.EventMask + err error + ) + + log.Infof(ctx, "Configuring plugin %s for runtime %s/%s...", stub.Name(), + req.RuntimeName, req.RuntimeVersion) + + stub.registrationTimeout = time.Duration(req.RegistrationTimeout * int64(time.Millisecond)) + stub.requestTimeout = time.Duration(req.RequestTimeout * int64(time.Millisecond)) + + defer func() { + stub.cfgErrC <- retErr + }() + + if handler := stub.handlers.Configure; handler == nil { + events = stub.events + } else { + events, err = handler(ctx, req.Config, req.RuntimeName, req.RuntimeVersion) + if err != nil { + log.Errorf(ctx, "Plugin configuration failed: %v", err) + return nil, err + } + + if events == 0 { + events = stub.events + } + + // Only allow plugins to subscribe to events they can handle. + if extra := events & ^stub.events; extra != 0 { + log.Errorf(ctx, "Plugin subscribed for unhandled events %s (0x%x)", + extra.PrettyString(), extra) + return nil, fmt.Errorf("internal error: unhandled events %s (0x%x)", + extra.PrettyString(), extra) + } + + log.Infof(ctx, "Subscribing plugin %s (%s) for events %s", stub.Name(), + filepath.Base(os.Args[0]), events.PrettyString()) + } + + return &api.ConfigureResponse{ + Events: int32(events), + }, nil +} + +// Synchronize the state of the plugin with the runtime. +func (stub *stub) Synchronize(ctx context.Context, req *api.SynchronizeRequest) (*api.SynchronizeResponse, error) { + handler := stub.handlers.Synchronize + if handler == nil { + return &api.SynchronizeResponse{More: req.More}, nil + } + + if req.More { + return stub.collectSync(req) + } + + return stub.deliverSync(ctx, req) +} + +func (stub *stub) collectSync(req *api.SynchronizeRequest) (*api.SynchronizeResponse, error) { + stub.Lock() + defer stub.Unlock() + + log.Debugf(noCtx, "collecting sync req with %d pods, %d containers...", + len(req.Pods), len(req.Containers)) + + if stub.syncReq == nil { + stub.syncReq = req + } else { + stub.syncReq.Pods = append(stub.syncReq.Pods, req.Pods...) + stub.syncReq.Containers = append(stub.syncReq.Containers, req.Containers...) + } + + return &api.SynchronizeResponse{More: req.More}, nil +} + +func (stub *stub) deliverSync(ctx context.Context, req *api.SynchronizeRequest) (*api.SynchronizeResponse, error) { + stub.Lock() + syncReq := stub.syncReq + stub.syncReq = nil + stub.Unlock() + + if syncReq == nil { + syncReq = req + } else { + syncReq.Pods = append(syncReq.Pods, req.Pods...) + syncReq.Containers = append(syncReq.Containers, req.Containers...) + } + + update, err := stub.handlers.Synchronize(ctx, syncReq.Pods, syncReq.Containers) + return &api.SynchronizeResponse{ + Update: update, + More: false, + }, err +} + +// Shutdown the plugin. +func (stub *stub) Shutdown(ctx context.Context, _ *api.ShutdownRequest) (*api.ShutdownResponse, error) { + handler := stub.handlers.Shutdown + if handler != nil { + handler(ctx) + } + return &api.ShutdownResponse{}, nil +} + +// CreateContainer request handler. +func (stub *stub) CreateContainer(ctx context.Context, req *api.CreateContainerRequest) (*api.CreateContainerResponse, error) { + handler := stub.handlers.CreateContainer + if handler == nil { + return nil, nil + } + adjust, update, err := handler(ctx, req.Pod, req.Container) + return &api.CreateContainerResponse{ + Adjust: adjust, + Update: update, + }, err +} + +// UpdateContainer request handler. +func (stub *stub) UpdateContainer(ctx context.Context, req *api.UpdateContainerRequest) (*api.UpdateContainerResponse, error) { + handler := stub.handlers.UpdateContainer + if handler == nil { + return nil, nil + } + update, err := handler(ctx, req.Pod, req.Container, req.LinuxResources) + return &api.UpdateContainerResponse{ + Update: update, + }, err +} + +// StopContainer request handler. +func (stub *stub) StopContainer(ctx context.Context, req *api.StopContainerRequest) (*api.StopContainerResponse, error) { + handler := stub.handlers.StopContainer + if handler == nil { + return nil, nil + } + update, err := handler(ctx, req.Pod, req.Container) + return &api.StopContainerResponse{ + Update: update, + }, err +} + +// StateChange event handler. +func (stub *stub) StateChange(ctx context.Context, evt *api.StateChangeEvent) (*api.Empty, error) { + var err error + switch evt.Event { + case api.Event_RUN_POD_SANDBOX: + if handler := stub.handlers.RunPodSandbox; handler != nil { + err = handler(ctx, evt.Pod) + } + case api.Event_STOP_POD_SANDBOX: + if handler := stub.handlers.StopPodSandbox; handler != nil { + err = handler(ctx, evt.Pod) + } + case api.Event_REMOVE_POD_SANDBOX: + if handler := stub.handlers.RemovePodSandbox; handler != nil { + err = handler(ctx, evt.Pod) + } + case api.Event_POST_CREATE_CONTAINER: + if handler := stub.handlers.PostCreateContainer; handler != nil { + err = handler(ctx, evt.Pod, evt.Container) + } + case api.Event_START_CONTAINER: + if handler := stub.handlers.StartContainer; handler != nil { + err = handler(ctx, evt.Pod, evt.Container) + } + case api.Event_POST_START_CONTAINER: + if handler := stub.handlers.PostStartContainer; handler != nil { + err = handler(ctx, evt.Pod, evt.Container) + } + case api.Event_POST_UPDATE_CONTAINER: + if handler := stub.handlers.PostUpdateContainer; handler != nil { + err = handler(ctx, evt.Pod, evt.Container) + } + case api.Event_REMOVE_CONTAINER: + if handler := stub.handlers.RemoveContainer; handler != nil { + err = handler(ctx, evt.Pod, evt.Container) + } + } + + return &api.StateChangeResponse{}, err +} + +// ensureIdentity sets plugin index and name from the binary if those are unset. +func (stub *stub) ensureIdentity() error { + if stub.idx != "" && stub.name != "" { + return nil + } + + if stub.idx != "" { + stub.name = filepath.Base(os.Args[0]) + return nil + } + + idx, name, err := api.ParsePluginName(filepath.Base(os.Args[0])) + if err != nil { + return err + } + + stub.name = name + stub.idx = idx + + return nil +} + +// Set up event handlers and the subscription mask for the plugin. +func (stub *stub) setupHandlers() error { + if plugin, ok := stub.plugin.(ConfigureInterface); ok { + stub.handlers.Configure = plugin.Configure + } + if plugin, ok := stub.plugin.(SynchronizeInterface); ok { + stub.handlers.Synchronize = plugin.Synchronize + } + if plugin, ok := stub.plugin.(ShutdownInterface); ok { + stub.handlers.Shutdown = plugin.Shutdown + } + + if plugin, ok := stub.plugin.(RunPodInterface); ok { + stub.handlers.RunPodSandbox = plugin.RunPodSandbox + stub.events.Set(api.Event_RUN_POD_SANDBOX) + } + if plugin, ok := stub.plugin.(StopPodInterface); ok { + stub.handlers.StopPodSandbox = plugin.StopPodSandbox + stub.events.Set(api.Event_STOP_POD_SANDBOX) + } + if plugin, ok := stub.plugin.(RemovePodInterface); ok { + stub.handlers.RemovePodSandbox = plugin.RemovePodSandbox + stub.events.Set(api.Event_REMOVE_POD_SANDBOX) + } + if plugin, ok := stub.plugin.(CreateContainerInterface); ok { + stub.handlers.CreateContainer = plugin.CreateContainer + stub.events.Set(api.Event_CREATE_CONTAINER) + } + if plugin, ok := stub.plugin.(StartContainerInterface); ok { + stub.handlers.StartContainer = plugin.StartContainer + stub.events.Set(api.Event_START_CONTAINER) + } + if plugin, ok := stub.plugin.(UpdateContainerInterface); ok { + stub.handlers.UpdateContainer = plugin.UpdateContainer + stub.events.Set(api.Event_UPDATE_CONTAINER) + } + if plugin, ok := stub.plugin.(StopContainerInterface); ok { + stub.handlers.StopContainer = plugin.StopContainer + stub.events.Set(api.Event_STOP_CONTAINER) + } + if plugin, ok := stub.plugin.(RemoveContainerInterface); ok { + stub.handlers.RemoveContainer = plugin.RemoveContainer + stub.events.Set(api.Event_REMOVE_CONTAINER) + } + if plugin, ok := stub.plugin.(PostCreateContainerInterface); ok { + stub.handlers.PostCreateContainer = plugin.PostCreateContainer + stub.events.Set(api.Event_POST_CREATE_CONTAINER) + } + if plugin, ok := stub.plugin.(PostStartContainerInterface); ok { + stub.handlers.PostStartContainer = plugin.PostStartContainer + stub.events.Set(api.Event_POST_START_CONTAINER) + } + if plugin, ok := stub.plugin.(PostUpdateContainerInterface); ok { + stub.handlers.PostUpdateContainer = plugin.PostUpdateContainer + stub.events.Set(api.Event_POST_UPDATE_CONTAINER) + } + + if stub.events == 0 { + return fmt.Errorf("internal error: plugin %T does not implement any NRI request handlers", + stub.plugin) + } + + return nil +} diff --git a/vendor/github.com/containerd/ttrpc/.gitattributes b/vendor/github.com/containerd/ttrpc/.gitattributes new file mode 100644 index 0000000000..d207b1802b --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/containerd/ttrpc/.gitignore b/vendor/github.com/containerd/ttrpc/.gitignore new file mode 100644 index 0000000000..88ceb2764b --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.gitignore @@ -0,0 +1,13 @@ +# Binaries for programs and plugins +/bin/ +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +coverage.txt diff --git a/vendor/github.com/containerd/ttrpc/.golangci.yml b/vendor/github.com/containerd/ttrpc/.golangci.yml new file mode 100644 index 0000000000..6462e52f66 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.golangci.yml @@ -0,0 +1,52 @@ +linters: + enable: + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +linters-settings: + revive: + ignore-generated-headers: true + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + arguments: [["UID", "GID"], []] + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + +issues: + include: + - EXC0002 + +run: + timeout: 8m + skip-dirs: + - example diff --git a/vendor/github.com/containerd/ttrpc/LICENSE b/vendor/github.com/containerd/ttrpc/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/ttrpc/Makefile b/vendor/github.com/containerd/ttrpc/Makefile new file mode 100644 index 0000000000..c3a497dcac --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/Makefile @@ -0,0 +1,180 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Go command to use for build +GO ?= go +INSTALL ?= install + +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +WHALE = "🇩" +ONI = "👹" + +# Project binaries. +COMMANDS=protoc-gen-go-ttrpc protoc-gen-gogottrpc + +ifdef BUILDTAGS + GO_BUILDTAGS = ${BUILDTAGS} +endif +GO_BUILDTAGS ?= +GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) + +# Project packages. +PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /example) +TESTPACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /cmd | grep -v /integration | grep -v /example) +BINPACKAGES=$(addprefix ./cmd/,$(COMMANDS)) + +#Replaces ":" (*nix), ";" (windows) with newline for easy parsing +GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n") + +TESTFLAGS_RACE= +GO_BUILD_FLAGS= +# See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809 +GO_GCFLAGS=$(shell \ + set -- ${GOPATHS}; \ + echo "-gcflags=-trimpath=$${1}/src"; \ + ) + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= $(TESTFLAGS_RACE) $(EXTRA_TESTFLAGS) +TESTFLAGS_PARALLEL ?= 8 + +# Use this to replace `go test` with, for instance, `gotestsum` +GOTEST ?= $(GO) test + +.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install vendor install-protobuf install-protobuild +.DEFAULT: default + +# Forcibly set the default goal to all, in case an include above brought in a rule definition. +.DEFAULT_GOAL := all + +all: binaries + +check: proto-fmt ## run all linters + @echo "$(WHALE) $@" + GOGC=75 golangci-lint run + +ci: check binaries check-protos coverage # coverage-integration ## to be used by the CI + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +generate: protos + @echo "$(WHALE) $@" + @PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES} + +protos: bin/protoc-gen-gogottrpc bin/protoc-gen-go-ttrpc ## generate protobuf + @echo "$(WHALE) $@" + @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}) + +check-protos: protos ## check if protobufs needs to be generated again + @echo "$(WHALE) $@" + @test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "$(ONI) please run 'make protos' when making changes to proto files" && false)) + +check-api-descriptors: protos ## check that protobuf changes aren't present. + @echo "$(WHALE) $@" + @test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \ + ((git diff $$(find . -name '*.pb.txt') | cat) && \ + (echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false)) + +proto-fmt: ## check format of proto files + @echo "$(WHALE) $@" + @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ + (echo "$(ONI) please indent proto files with tabs only" && false) + @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ + (echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false) + +build: ## build the go packages + @echo "$(WHALE) $@" + @$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${EXTRA_FLAGS} ${PACKAGES} + +test: ## run tests, except integration tests and tests that require root + @echo "$(WHALE) $@" + @$(GOTEST) ${TESTFLAGS} ${TESTPACKAGES} + +integration: ## run integration tests + @echo "$(WHALE) $@" + @cd "${ROOTDIR}/integration" && $(GOTEST) -v ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} . + +benchmark: ## run benchmarks tests + @echo "$(WHALE) $@" + @$(GO) test ${TESTFLAGS} -bench . -run Benchmark + +FORCE: + +define BUILD_BINARY +@echo "$(WHALE) $@" +@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_TAGS} ./$< +endef + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + $(call BUILD_BINARY) + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) + +install: ## install binaries + @echo "$(WHALE) $@ $(BINPACKAGES)" + @$(GO) install $(BINPACKAGES) + +install-protobuf: + @echo "$(WHALE) $@" + @script/install-protobuf + +install-protobuild: + @echo "$(WHALE) $@" + @$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 + @$(GO) install github.com/containerd/protobuild@14832ccc41429f5c4f81028e5af08aa233a219cf + +coverage: ## generate coverprofiles from the unit tests, except tests that require root + @echo "$(WHALE) $@" + @rm -f coverage.txt + @$(GO) test ${TESTFLAGS} ${TESTPACKAGES} 2> /dev/null + @( for pkg in ${PACKAGES}; do \ + $(GO) test ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +vendor: ## ensure all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + @$(GO) mod tidy + @$(GO) mod verify + +verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + @$(GO) mod tidy + @$(GO) mod verify + @test -z "$$(git status --short | grep "go.sum" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "$(ONI) make sure to checkin changes after go mod tidy" && false)) + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort diff --git a/vendor/github.com/containerd/ttrpc/PROTOCOL.md b/vendor/github.com/containerd/ttrpc/PROTOCOL.md new file mode 100644 index 0000000000..12b43f6bd6 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/PROTOCOL.md @@ -0,0 +1,240 @@ +# Protocol Specification + +The ttrpc protocol is client/server protocol to support multiple request streams +over a single connection with lightweight framing. The client represents the +process which initiated the underlying connection and the server is the process +which accepted the connection. The protocol is currently defined as +asymmetrical, with clients sending requests and servers sending responses. Both +clients and servers are able to send stream data. The roles are also used in +determining the stream identifiers, with client initiated streams using odd +number identifiers and server initiated using even number. The protocol may be +extended in the future to support server initiated streams, that is not +supported in the latest version. + +## Purpose + +The ttrpc protocol is designed to be lightweight and optimized for low latency +and reliable connections between processes on the same host. The protocol does +not include features for handling unreliable connections such as handshakes, +resets, pings, or flow control. The protocol is designed to make low-overhead +implementations as simple as possible. It is not intended as a suitable +replacement for HTTP2/3 over the network. + +## Message Frame + +Each Message Frame consists of a 10-byte message header followed +by message data. The data length and stream ID are both big-endian +4-byte unsigned integers. The message type is an unsigned 1-byte +integer. The flags are also an unsigned 1-byte integer and +use is defined by the message type. + + +---------------------------------------------------------------+ + | Data Length (32) | + +---------------------------------------------------------------+ + | Stream ID (32) | + +---------------+-----------------------------------------------+ + | Msg Type (8) | + +---------------+ + | Flags (8) | + +---------------+-----------------------------------------------+ + | Data (*) | + +---------------------------------------------------------------+ + +The Data Length field represents the number of bytes in the Data field. The +total frame size will always be Data Length + 10 bytes. The maximum data length +is 4MB and any larger size should be rejected. Due to the maximum data size +being less than 16MB, the first frame byte should always be zero. This first +byte should be considered reserved for future use. + +The Stream ID must be odd for client initiated streams and even for server +initiated streams. Server initiated streams are not currently supported. + +## Mesage Types + +| Message Type | Name | Description | +|--------------|----------|----------------------------------| +| 0x01 | Request | Initiates stream | +| 0x02 | Response | Final stream data and terminates | +| 0x03 | Data | Stream data | + +### Request + +The request message is used to initiate stream and send along request data for +properly routing and handling the stream. The stream may indicate unary without +any inbound or outbound stream data with only a response is expected on the +stream. The request may also indicate the stream is still open for more data and +no response is expected until data is finished. If the remote indicates the +stream is closed, the request may be considered non-unary but without anymore +stream data sent. In the case of `remote closed`, the remote still expects to +receive a response or stream data. For compatibility with non streaming clients, +a request with empty flags indicates a unary request. + +#### Request Flags + +| Flag | Name | Description | +|------|-----------------|--------------------------------------------------| +| 0x01 | `remote closed` | Non-unary, but no more data expected from remote | +| 0x02 | `remote open` | Non-unary, remote is still sending data | + +### Response + +The response message is used to end a stream with data, an empty response, or +an error. A response message is the only expected message after a unary request. +A non-unary request does not require a response message if the server is sending +back stream data. A non-unary stream may return a single response message but no +other stream data may follow. + +#### Response Flags + +No response flags are defined at this time, flags should be empty. + +### Data + +The data message is used to send data on an already initialized stream. Either +client or server may send data. A data message is not allowed on a unary stream. +A data message should not be sent after indicating `remote closed` to the peer. +The last data message on a stream must set the `remote closed` flag. + +The `no data` flag is used to indicate that the data message does not include +any data. This is normally used with the `remote closed` flag to indicate the +stream is now closed without transmitting any data. Since ttrpc normally +transmits a single object per message, a zero length data message may be +interpreted as an empty object. For example, transmitting the number zero as a +protobuf message ends up with a data length of zero, but the message is still +considered data and should be processed. + +#### Data Flags + +| Flag | Name | Description | +|------|-----------------|-----------------------------------| +| 0x01 | `remote closed` | No more data expected from remote | +| 0x04 | `no data` | This message does not have data | + +## Streaming + +All ttrpc requests use streams to transfer data. Unary streams will only have +two messages sent per stream, a request from a client and a response from the +server. Non-unary streams, however, may send any numbers of messages from the +client and the server. This makes stream management more complicated than unary +streams since both client and server need to track additional state. To keep +this management as simple as possible, ttrpc minimizes the number of states and +uses two flags instead of control frames. Each stream has two states while a +stream is still alive: `local closed` and `remote closed`. Each peer considers +local and remote from their own perspective and sets flags from the other peer's +perspective. For example, if a client sends a data frame with the +`remote closed` flag, that is indicating that the client is now `local closed` +and the server will be `remote closed`. A unary operation does not need to send +these flags since each received message always indicates `remote closed`. Once a +peer is both `local closed` and `remote closed`, the stream is considered +finished and may be cleaned up. + +Due to the asymmetric nature of the current protocol, a client should +always be in the `local closed` state before `remote closed` and a server should +always be in the `remote closed` state before `local closed`. This happens +because the client is always initiating requests and a client always expects a +final response back from a server to indicate the initiated request has been +fulfilled. This may mean server sends a final empty response to finish a stream +even after it has already completed sending data before the client. + +### Unary State Diagram + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +---------+ | + local >---------------+ Request +--------------------> remote + closed | +---------+ | closed + | | + | +----------+ | + finished <--------------+ Response +--------------------< finished + | +----------+ | + | | + +### Non-Unary State Diagrams + +RC: `remote closed` flag +RO: `remote open` flag + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + >-------------+ Request [RO] +-----------------> + | +--------------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +-----------+ | + local >---------------+ Data [RC] +------------------> remote + closed | +-----------+ | closed + | | + | +----------+ | + finished <--------------+ Response +--------------------< finished + | +----------+ | + | | + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + local >-------------+ Request [RC] +-----------------> remote + closed | +--------------+ | closed + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +-----------+ | + finished <---------------+ Data [RC] +------------------< finished + | +-----------+ | + | | + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + >-------------+ Request [RO] +-----------------> + | +--------------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +-----------+ | + local >---------------+ Data [RC] +------------------> remote + closed | +-----------+ | closed + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +-----------+ | + finished <---------------+ Data [RC] +------------------< finished + | +-----------+ | + | | + +## RPC + +While this protocol is defined primarily to support Remote Procedure Calls, the +protocol does not define the request and response types beyond the messages +defined in the protocol. The implementation provides a default protobuf +definition of request and response which may be used for cross language rpc. +All implementations should at least define a request type which support +routing by procedure name and a response type which supports call status. + +## Version History + +| Version | Features | +|---------|---------------------| +| 1.0 | Unary requests only | +| 1.2 | Streaming support | diff --git a/vendor/github.com/containerd/ttrpc/Protobuild.toml b/vendor/github.com/containerd/ttrpc/Protobuild.toml new file mode 100644 index 0000000000..0f6ccbd1e8 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/Protobuild.toml @@ -0,0 +1,28 @@ +version = "2" +generators = ["go"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["."] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "proto/status.proto" = "google.golang.org/genproto/googleapis/rpc/status" + +[[overrides]] +# enable ttrpc and disable fieldpath and grpc for the shim +prefixes = ["github.com/containerd/ttrpc/integration/streaming"] +generators = ["go", "go-ttrpc"] + +[overrides.parameters.go-ttrpc] +prefix = "TTRPC" diff --git a/vendor/github.com/containerd/ttrpc/README.md b/vendor/github.com/containerd/ttrpc/README.md new file mode 100644 index 0000000000..ce95f63bee --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/README.md @@ -0,0 +1,59 @@ +# ttrpc + +[![Build Status](https://github.com/containerd/ttrpc/actions/workflows/ci.yml/badge.svg)](https://github.com/containerd/ttrpc/actions/workflows/ci.yml) + +GRPC for low-memory environments. + +The existing grpc-go project requires a lot of memory overhead for importing +packages and at runtime. While this is great for many services with low density +requirements, this can be a problem when running a large number of services on +a single machine or on a machine with a small amount of memory. + +Using the same GRPC definitions, this project reduces the binary size and +protocol overhead required. We do this by eliding the `net/http`, `net/http2` +and `grpc` package used by grpc replacing it with a lightweight framing +protocol. The result are smaller binaries that use less resident memory with +the same ease of use as GRPC. + +Please note that while this project supports generating either end of the +protocol, the generated service definitions will be incompatible with regular +GRPC services, as they do not speak the same protocol. + +# Protocol + +See the [protocol specification](./PROTOCOL.md). + +# Usage + +Create a gogo vanity binary (see +[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an +example with the ttrpc plugin enabled. + +It's recommended to use [`protobuild`](https://github.com/containerd/protobuild) +to build the protobufs for this project, but this will work with protoc +directly, if required. + +# Differences from GRPC + +- The protocol stack has been replaced with a lighter protocol that doesn't + require http, http2 and tls. +- The client and server interface are identical whereas in GRPC there is a + client and server interface that are different. +- The Go stdlib context package is used instead. + +# Status + +TODO: + +- [ ] Add testing under concurrent load to ensure +- [ ] Verify connection error handling + +# Project details + +ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go new file mode 100644 index 0000000000..872261e6de --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/channel.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + messageHeaderLength = 10 + messageLengthMax = 4 << 20 +) + +type messageType uint8 + +const ( + messageTypeRequest messageType = 0x1 + messageTypeResponse messageType = 0x2 + messageTypeData messageType = 0x3 +) + +func (mt messageType) String() string { + switch mt { + case messageTypeRequest: + return "request" + case messageTypeResponse: + return "response" + case messageTypeData: + return "data" + default: + return "unknown" + } +} + +const ( + flagRemoteClosed uint8 = 0x1 + flagRemoteOpen uint8 = 0x2 + flagNoData uint8 = 0x4 +) + +// messageHeader represents the fixed-length message header of 10 bytes sent +// with every request. +type messageHeader struct { + Length uint32 // length excluding this header. b[:4] + StreamID uint32 // identifies which request stream message is a part of. b[4:8] + Type messageType // message type b[8] + Flags uint8 // type specific flags b[9] +} + +func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { + _, err := io.ReadFull(r, p[:messageHeaderLength]) + if err != nil { + return messageHeader{}, err + } + + return messageHeader{ + Length: binary.BigEndian.Uint32(p[:4]), + StreamID: binary.BigEndian.Uint32(p[4:8]), + Type: messageType(p[8]), + Flags: p[9], + }, nil +} + +func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error { + binary.BigEndian.PutUint32(p[:4], mh.Length) + binary.BigEndian.PutUint32(p[4:8], mh.StreamID) + p[8] = byte(mh.Type) + p[9] = mh.Flags + + _, err := w.Write(p[:]) + return err +} + +var buffers sync.Pool + +type channel struct { + conn net.Conn + bw *bufio.Writer + br *bufio.Reader + hrbuf [messageHeaderLength]byte // avoid alloc when reading header + hwbuf [messageHeaderLength]byte +} + +func newChannel(conn net.Conn) *channel { + return &channel{ + conn: conn, + bw: bufio.NewWriter(conn), + br: bufio.NewReader(conn), + } +} + +// recv a message from the channel. The returned buffer contains the message. +// +// If a valid grpc status is returned, the message header +// returned will be valid and caller should send that along to +// the correct consumer. The bytes on the underlying channel +// will be discarded. +func (ch *channel) recv() (messageHeader, []byte, error) { + mh, err := readMessageHeader(ch.hrbuf[:], ch.br) + if err != nil { + return messageHeader{}, nil, err + } + + if mh.Length > uint32(messageLengthMax) { + if _, err := ch.br.Discard(int(mh.Length)); err != nil { + return mh, nil, fmt.Errorf("failed to discard after receiving oversized message: %w", err) + } + + return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) + } + + var p []byte + if mh.Length > 0 { + p = ch.getmbuf(int(mh.Length)) + if _, err := io.ReadFull(ch.br, p); err != nil { + return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) + } + } + + return mh, p, nil +} + +func (ch *channel) send(streamID uint32, t messageType, flags uint8, p []byte) error { + if len(p) > messageLengthMax { + return OversizedMessageError(len(p)) + } + + if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t, Flags: flags}); err != nil { + return err + } + + if len(p) > 0 { + _, err := ch.bw.Write(p) + if err != nil { + return err + } + } + + return ch.bw.Flush() +} + +func (ch *channel) getmbuf(size int) []byte { + // we can't use the standard New method on pool because we want to allocate + // based on size. + b, ok := buffers.Get().(*[]byte) + if !ok || cap(*b) < size { + // TODO(stevvooe): It may be better to allocate these in fixed length + // buckets to reduce fragmentation but its not clear that would help + // with performance. An ilogb approach or similar would work well. + bb := make([]byte, size) + b = &bb + } else { + *b = (*b)[:size] + } + return *b +} + +func (ch *channel) putmbuf(p []byte) { + buffers.Put(&p) +} diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go new file mode 100644 index 0000000000..b1bc7a3fc4 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -0,0 +1,570 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/log" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Client for a ttrpc server +type Client struct { + codec codec + conn net.Conn + channel *channel + + streamLock sync.RWMutex + streams map[streamID]*stream + nextStreamID streamID + sendLock sync.Mutex + + ctx context.Context + closed func() + + closeOnce sync.Once + userCloseFunc func() + userCloseWaitCh chan struct{} + + interceptor UnaryClientInterceptor +} + +// ClientOpts configures a client +type ClientOpts func(c *Client) + +// WithOnClose sets the close func whenever the client's Close() method is called +func WithOnClose(onClose func()) ClientOpts { + return func(c *Client) { + c.userCloseFunc = onClose + } +} + +// WithUnaryClientInterceptor sets the provided client interceptor +func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { + return func(c *Client) { + c.interceptor = i + } +} + +// WithChainUnaryClientInterceptor sets the provided chain of client interceptors +func WithChainUnaryClientInterceptor(interceptors ...UnaryClientInterceptor) ClientOpts { + return func(c *Client) { + if len(interceptors) == 0 { + return + } + if c.interceptor != nil { + interceptors = append([]UnaryClientInterceptor{c.interceptor}, interceptors...) + } + c.interceptor = func( + ctx context.Context, + req *Request, + reply *Response, + info *UnaryClientInfo, + final Invoker, + ) error { + return interceptors[0](ctx, req, reply, info, + chainUnaryInterceptors(interceptors[1:], final, info)) + } + } +} + +func chainUnaryInterceptors(interceptors []UnaryClientInterceptor, final Invoker, info *UnaryClientInfo) Invoker { + if len(interceptors) == 0 { + return final + } + return func( + ctx context.Context, + req *Request, + reply *Response, + ) error { + return interceptors[0](ctx, req, reply, info, + chainUnaryInterceptors(interceptors[1:], final, info)) + } +} + +// NewClient creates a new ttrpc client using the given connection +func NewClient(conn net.Conn, opts ...ClientOpts) *Client { + ctx, cancel := context.WithCancel(context.Background()) + channel := newChannel(conn) + c := &Client{ + codec: codec{}, + conn: conn, + channel: channel, + streams: make(map[streamID]*stream), + nextStreamID: 1, + closed: cancel, + ctx: ctx, + userCloseFunc: func() {}, + userCloseWaitCh: make(chan struct{}), + } + + for _, o := range opts { + o(c) + } + + if c.interceptor == nil { + c.interceptor = defaultClientInterceptor + } + + go c.run() + return c +} + +func (c *Client) send(sid uint32, mt messageType, flags uint8, b []byte) error { + c.sendLock.Lock() + defer c.sendLock.Unlock() + return c.channel.send(sid, mt, flags, b) +} + +// Call makes a unary request and returns with response +func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { + payload, err := c.codec.Marshal(req) + if err != nil { + return err + } + + var ( + creq = &Request{ + Service: service, + Method: method, + Payload: payload, + // TODO: metadata from context + } + + cresp = &Response{} + ) + + if metadata, ok := GetMetadata(ctx); ok { + metadata.setRequest(creq) + } + + if dl, ok := ctx.Deadline(); ok { + creq.TimeoutNano = time.Until(dl).Nanoseconds() + } + + info := &UnaryClientInfo{ + FullMethod: fullPath(service, method), + } + if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil { + return err + } + + if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil { + return err + } + + if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) { + return status.ErrorProto(cresp.Status) + } + return nil +} + +// StreamDesc describes the stream properties, whether the stream has +// a streaming client, a streaming server, or both +type StreamDesc struct { + StreamingClient bool + StreamingServer bool +} + +// ClientStream is used to send or recv messages on the underlying stream +type ClientStream interface { + CloseSend() error + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} + +type clientStream struct { + ctx context.Context + s *stream + c *Client + desc *StreamDesc + localClosed bool + remoteClosed bool +} + +func (cs *clientStream) CloseSend() error { + if !cs.desc.StreamingClient { + return fmt.Errorf("%w: cannot close non-streaming client", ErrProtocol) + } + if cs.localClosed { + return ErrStreamClosed + } + err := cs.s.send(messageTypeData, flagRemoteClosed|flagNoData, nil) + if err != nil { + return filterCloseErr(err) + } + cs.localClosed = true + return nil +} + +func (cs *clientStream) SendMsg(m interface{}) error { + if !cs.desc.StreamingClient { + return fmt.Errorf("%w: cannot send data from non-streaming client", ErrProtocol) + } + if cs.localClosed { + return ErrStreamClosed + } + + var ( + payload []byte + err error + ) + if m != nil { + payload, err = cs.c.codec.Marshal(m) + if err != nil { + return err + } + } + + err = cs.s.send(messageTypeData, 0, payload) + if err != nil { + return filterCloseErr(err) + } + + return nil +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.remoteClosed { + return io.EOF + } + + var msg *streamMessage + select { + case <-cs.ctx.Done(): + return cs.ctx.Err() + case <-cs.s.recvClose: + // If recv has a pending message, process that first + select { + case msg = <-cs.s.recv: + default: + return cs.s.recvErr + } + case msg = <-cs.s.recv: + } + + if msg.header.Type == messageTypeResponse { + resp := &Response{} + err := proto.Unmarshal(msg.payload[:msg.header.Length], resp) + // return the payload buffer for reuse + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + + if err := cs.c.codec.Unmarshal(resp.Payload, m); err != nil { + return err + } + + if resp.Status != nil && resp.Status.Code != int32(codes.OK) { + return status.ErrorProto(resp.Status) + } + + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + + return nil + } else if msg.header.Type == messageTypeData { + if !cs.desc.StreamingServer { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + return fmt.Errorf("received data from non-streaming server: %w", ErrProtocol) + } + if msg.header.Flags&flagRemoteClosed == flagRemoteClosed { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + + if msg.header.Flags&flagNoData == flagNoData { + return io.EOF + } + } + + err := cs.c.codec.Unmarshal(msg.payload[:msg.header.Length], m) + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + return nil + } + + return fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) +} + +// Close closes the ttrpc connection and underlying connection +func (c *Client) Close() error { + c.closeOnce.Do(func() { + c.closed() + + c.conn.Close() + }) + return nil +} + +// UserOnCloseWait is used to block until the user's on-close callback +// finishes. +func (c *Client) UserOnCloseWait(ctx context.Context) error { + select { + case <-c.userCloseWaitCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (c *Client) run() { + err := c.receiveLoop() + c.Close() + c.cleanupStreams(err) + + c.userCloseFunc() + close(c.userCloseWaitCh) +} + +func (c *Client) receiveLoop() error { + for { + select { + case <-c.ctx.Done(): + return ErrClosed + default: + var ( + msg = &streamMessage{} + err error + ) + + msg.header, msg.payload, err = c.channel.recv() + if err != nil { + _, ok := status.FromError(err) + if !ok { + // treat all errors that are not an rpc status as terminal. + // all others poison the connection. + return filterCloseErr(err) + } + } + sid := streamID(msg.header.StreamID) + s := c.getStream(sid) + if s == nil { + log.G(c.ctx).WithField("stream", sid).Error("ttrpc: received message on inactive stream") + continue + } + + if err != nil { + s.closeWithError(err) + } else { + if err := s.receive(c.ctx, msg); err != nil { + log.G(c.ctx).WithFields(log.Fields{"error": err, "stream": sid}).Error("ttrpc: failed to handle message") + } + } + } + } +} + +// createStream creates a new stream and registers it with the client +// Introduce stream types for multiple or single response +func (c *Client) createStream(flags uint8, b []byte) (*stream, error) { + // sendLock must be held across both allocation of the stream ID and sending it across the wire. + // This ensures that new stream IDs sent on the wire are always increasing, which is a + // requirement of the TTRPC protocol. + // This use of sendLock could be split into another mutex that covers stream creation + first send, + // and just use sendLock to guard writing to the wire, but for now it seems simpler to have fewer mutexes. + c.sendLock.Lock() + defer c.sendLock.Unlock() + + // Check if closed since lock acquired to prevent adding + // anything after cleanup completes + select { + case <-c.ctx.Done(): + return nil, ErrClosed + default: + } + + var s *stream + if err := func() error { + // In the future this could be replaced with a sync.Map instead of streamLock+map. + c.streamLock.Lock() + defer c.streamLock.Unlock() + + // Check if closed since lock acquired to prevent adding + // anything after cleanup completes + select { + case <-c.ctx.Done(): + return ErrClosed + default: + } + + s = newStream(c.nextStreamID, c) + c.streams[s.id] = s + c.nextStreamID = c.nextStreamID + 2 + + return nil + }(); err != nil { + return nil, err + } + + if err := c.channel.send(uint32(s.id), messageTypeRequest, flags, b); err != nil { + return s, filterCloseErr(err) + } + + return s, nil +} + +func (c *Client) deleteStream(s *stream) { + c.streamLock.Lock() + delete(c.streams, s.id) + c.streamLock.Unlock() + s.closeWithError(nil) +} + +func (c *Client) getStream(sid streamID) *stream { + c.streamLock.RLock() + s := c.streams[sid] + c.streamLock.RUnlock() + return s +} + +func (c *Client) cleanupStreams(err error) { + c.streamLock.Lock() + defer c.streamLock.Unlock() + + for sid, s := range c.streams { + s.closeWithError(err) + delete(c.streams, sid) + } +} + +// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when +// returning from call or handling errors from main read loop. +// +// This purposely ignores errors with a wrapped cause. +func filterCloseErr(err error) error { + switch { + case err == nil: + return nil + case err == io.EOF: + return ErrClosed + case errors.Is(err, io.ErrClosedPipe): + return ErrClosed + case errors.Is(err, io.EOF): + return ErrClosed + case strings.Contains(err.Error(), "use of closed network connection"): + return ErrClosed + default: + // if we have an epipe on a write or econnreset on a read , we cast to errclosed + var oerr *net.OpError + if errors.As(err, &oerr) { + if (oerr.Op == "write" && errors.Is(err, syscall.EPIPE)) || + (oerr.Op == "read" && errors.Is(err, syscall.ECONNRESET)) { + return ErrClosed + } + } + } + + return err +} + +// NewStream creates a new stream with the given stream descriptor to the +// specified service and method. If not a streaming client, the request object +// may be provided. +func (c *Client) NewStream(ctx context.Context, desc *StreamDesc, service, method string, req interface{}) (ClientStream, error) { + var payload []byte + if req != nil { + var err error + payload, err = c.codec.Marshal(req) + if err != nil { + return nil, err + } + } + + request := &Request{ + Service: service, + Method: method, + Payload: payload, + // TODO: metadata from context + } + p, err := c.codec.Marshal(request) + if err != nil { + return nil, err + } + + var flags uint8 + if desc.StreamingClient { + flags = flagRemoteOpen + } else { + flags = flagRemoteClosed + } + s, err := c.createStream(flags, p) + if err != nil { + return nil, err + } + + return &clientStream{ + ctx: ctx, + s: s, + c: c, + desc: desc, + }, nil +} + +func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { + p, err := c.codec.Marshal(req) + if err != nil { + return err + } + + s, err := c.createStream(0, p) + if err != nil { + return err + } + defer c.deleteStream(s) + + var msg *streamMessage + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.ctx.Done(): + return ErrClosed + case <-s.recvClose: + // If recv has a pending message, process that first + select { + case msg = <-s.recv: + default: + return s.recvErr + } + case msg = <-s.recv: + } + + if msg.header.Type == messageTypeResponse { + err = proto.Unmarshal(msg.payload[:msg.header.Length], resp) + } else { + err = fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) + } + + // return the payload buffer for reuse + c.channel.putmbuf(msg.payload) + + return err +} diff --git a/vendor/github.com/containerd/ttrpc/codec.go b/vendor/github.com/containerd/ttrpc/codec.go new file mode 100644 index 0000000000..3e82722a42 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/codec.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +type codec struct{} + +func (c codec) Marshal(msg interface{}) ([]byte, error) { + switch v := msg.(type) { + case proto.Message: + return proto.Marshal(v) + default: + return nil, fmt.Errorf("ttrpc: cannot marshal unknown type: %T", msg) + } +} + +func (c codec) Unmarshal(p []byte, msg interface{}) error { + switch v := msg.(type) { + case proto.Message: + return proto.Unmarshal(p, v) + default: + return fmt.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg) + } +} diff --git a/vendor/github.com/containerd/ttrpc/config.go b/vendor/github.com/containerd/ttrpc/config.go new file mode 100644 index 0000000000..f401f67be0 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/config.go @@ -0,0 +1,86 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "errors" +) + +type serverConfig struct { + handshaker Handshaker + interceptor UnaryServerInterceptor +} + +// ServerOpt for configuring a ttrpc server +type ServerOpt func(*serverConfig) error + +// WithServerHandshaker can be passed to NewServer to ensure that the +// handshaker is called before every connection attempt. +// +// Only one handshaker is allowed per server. +func WithServerHandshaker(handshaker Handshaker) ServerOpt { + return func(c *serverConfig) error { + if c.handshaker != nil { + return errors.New("only one handshaker allowed per server") + } + c.handshaker = handshaker + return nil + } +} + +// WithUnaryServerInterceptor sets the provided interceptor on the server +func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt { + return func(c *serverConfig) error { + if c.interceptor != nil { + return errors.New("only one unchained interceptor allowed per server") + } + c.interceptor = i + return nil + } +} + +// WithChainUnaryServerInterceptor sets the provided chain of server interceptors +func WithChainUnaryServerInterceptor(interceptors ...UnaryServerInterceptor) ServerOpt { + return func(c *serverConfig) error { + if len(interceptors) == 0 { + return nil + } + if c.interceptor != nil { + interceptors = append([]UnaryServerInterceptor{c.interceptor}, interceptors...) + } + c.interceptor = func( + ctx context.Context, + unmarshal Unmarshaler, + info *UnaryServerInfo, + method Method) (interface{}, error) { + return interceptors[0](ctx, unmarshal, info, + chainUnaryServerInterceptors(info, method, interceptors[1:])) + } + return nil + } +} + +func chainUnaryServerInterceptors(info *UnaryServerInfo, method Method, interceptors []UnaryServerInterceptor) Method { + if len(interceptors) == 0 { + return method + } + return func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + return interceptors[0](ctx, unmarshal, info, + chainUnaryServerInterceptors(info, method, interceptors[1:])) + } +} diff --git a/vendor/github.com/containerd/ttrpc/doc.go b/vendor/github.com/containerd/ttrpc/doc.go new file mode 100644 index 0000000000..d80cd424cc --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/doc.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +package ttrpc defines and implements a low level simple transfer protocol +optimized for low latency and reliable connections between processes on the same +host. The protocol uses simple framing for sending requests, responses, and data +using multiple streams. +*/ +package ttrpc diff --git a/vendor/github.com/containerd/ttrpc/errors.go b/vendor/github.com/containerd/ttrpc/errors.go new file mode 100644 index 0000000000..632dbe8bdf --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/errors.go @@ -0,0 +1,80 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "errors" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + // ErrProtocol is a general error in the handling the protocol. + ErrProtocol = errors.New("protocol error") + + // ErrClosed is returned by client methods when the underlying connection is + // closed. + ErrClosed = errors.New("ttrpc: closed") + + // ErrServerClosed is returned when the Server has closed its connection. + ErrServerClosed = errors.New("ttrpc: server closed") + + // ErrStreamClosed is when the streaming connection is closed. + ErrStreamClosed = errors.New("ttrpc: stream closed") +) + +// OversizedMessageErr is used to indicate refusal to send an oversized message. +// It wraps a ResourceExhausted grpc Status together with the offending message +// length. +type OversizedMessageErr struct { + messageLength int + err error +} + +// OversizedMessageError returns an OversizedMessageErr error for the given message +// length if it exceeds the allowed maximum. Otherwise a nil error is returned. +func OversizedMessageError(messageLength int) error { + if messageLength <= messageLengthMax { + return nil + } + + return &OversizedMessageErr{ + messageLength: messageLength, + err: status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", messageLength, messageLengthMax), + } +} + +// Error returns the error message for the corresponding grpc Status for the error. +func (e *OversizedMessageErr) Error() string { + return e.err.Error() +} + +// Unwrap returns the corresponding error with our grpc status code. +func (e *OversizedMessageErr) Unwrap() error { + return e.err +} + +// RejectedLength retrieves the rejected message length which triggered the error. +func (e *OversizedMessageErr) RejectedLength() int { + return e.messageLength +} + +// MaximumLength retrieves the maximum allowed message length that triggered the error. +func (*OversizedMessageErr) MaximumLength() int { + return messageLengthMax +} diff --git a/vendor/github.com/containerd/ttrpc/handshake.go b/vendor/github.com/containerd/ttrpc/handshake.go new file mode 100644 index 0000000000..3c6b610d35 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/handshake.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "net" +) + +// Handshaker defines the interface for connection handshakes performed on the +// server or client when first connecting. +type Handshaker interface { + // Handshake should confirm or decorate a connection that may be incoming + // to a server or outgoing from a client. + // + // If this returns without an error, the caller should use the connection + // in place of the original connection. + // + // The second return value can contain credential specific data, such as + // unix socket credentials or TLS information. + // + // While we currently only have implementations on the server-side, this + // interface should be sufficient to implement similar handshakes on the + // client-side. + Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) +} + +type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) + +func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + return fn(ctx, conn) +} + +func noopHandshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { + return conn, nil, nil +} diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go new file mode 100644 index 0000000000..7ff5e9d33f --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/interceptor.go @@ -0,0 +1,65 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "context" + +// UnaryServerInfo provides information about the server request +type UnaryServerInfo struct { + FullMethod string +} + +// UnaryClientInfo provides information about the client request +type UnaryClientInfo struct { + FullMethod string +} + +// StreamServerInfo provides information about the server request +type StreamServerInfo struct { + FullMethod string + StreamingClient bool + StreamingServer bool +} + +// Unmarshaler contains the server request data and allows it to be unmarshaled +// into a concrete type +type Unmarshaler func(interface{}) error + +// Invoker invokes the client's request and response from the ttrpc server +type Invoker func(context.Context, *Request, *Response) error + +// UnaryServerInterceptor specifies the interceptor function for server request/response +type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error) + +// UnaryClientInterceptor specifies the interceptor function for client request/response +type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error + +func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, _ *UnaryServerInfo, method Method) (interface{}, error) { + return method(ctx, unmarshal) +} + +func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { + return invoker(ctx, req, resp) +} + +type StreamServerInterceptor func(context.Context, StreamServer, *StreamServerInfo, StreamHandler) (interface{}, error) + +func defaultStreamServerInterceptor(ctx context.Context, ss StreamServer, _ *StreamServerInfo, stream StreamHandler) (interface{}, error) { + return stream(ctx, ss) +} + +type StreamClientInterceptor func(context.Context) diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go new file mode 100644 index 0000000000..6e00424874 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/metadata.go @@ -0,0 +1,135 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "strings" +) + +// MD is the user type for ttrpc metadata +type MD map[string][]string + +// Get returns the metadata for a given key when they exist. +// If there is no metadata, a nil slice and false are returned. +func (m MD) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + list, ok := m[key] + if !ok || len(list) == 0 { + return nil, false + } + + return list, true +} + +// Set sets the provided values for a given key. +// The values will overwrite any existing values. +// If no values provided, a key will be deleted. +func (m MD) Set(key string, values ...string) { + key = strings.ToLower(key) + if len(values) == 0 { + delete(m, key) + return + } + m[key] = values +} + +// Append appends additional values to the given key. +func (m MD) Append(key string, values ...string) { + key = strings.ToLower(key) + if len(values) == 0 { + return + } + current, ok := m[key] + if ok { + m.Set(key, append(current, values...)...) + } else { + m.Set(key, values...) + } +} + +// Clone returns a copy of MD or nil if it's nil. +// It's copied from golang's `http.Header.Clone` implementation: +// https://cs.opensource.google/go/go/+/refs/tags/go1.23.4:src/net/http/header.go;l=94 +func (m MD) Clone() MD { + if m == nil { + return nil + } + + // Find total number of values. + nv := 0 + for _, vv := range m { + nv += len(vv) + } + sv := make([]string, nv) // shared backing array for headers' values + m2 := make(MD, len(m)) + for k, vv := range m { + if vv == nil { + // Preserve nil values. + m2[k] = nil + continue + } + n := copy(sv, vv) + m2[k] = sv[:n:n] + sv = sv[n:] + } + return m2 +} + +func (m MD) setRequest(r *Request) { + for k, values := range m { + for _, v := range values { + r.Metadata = append(r.Metadata, &KeyValue{ + Key: k, + Value: v, + }) + } + } +} + +func (m MD) fromRequest(r *Request) { + for _, kv := range r.Metadata { + m[kv.Key] = append(m[kv.Key], kv.Value) + } +} + +type metadataKey struct{} + +// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata) +func GetMetadata(ctx context.Context) (MD, bool) { + metadata, ok := ctx.Value(metadataKey{}).(MD) + return metadata, ok +} + +// GetMetadataValue gets a specific metadata value by name from context.Context +func GetMetadataValue(ctx context.Context, name string) (string, bool) { + metadata, ok := GetMetadata(ctx) + if !ok { + return "", false + } + + if list, ok := metadata.Get(name); ok { + return list[0], true + } + + return "", false +} + +// WithMetadata attaches metadata map to a context.Context +func WithMetadata(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, metadataKey{}, md) +} diff --git a/vendor/github.com/containerd/ttrpc/request.pb.go b/vendor/github.com/containerd/ttrpc/request.pb.go new file mode 100644 index 0000000000..3921ae5a35 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/request.pb.go @@ -0,0 +1,396 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/ttrpc/request.proto + +package ttrpc + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,json=timeoutNano,proto3" json:"timeout_nano,omitempty"` + Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *Request) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *Request) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Request) GetTimeoutNano() int64 { + if x != nil { + return x.TimeoutNano + } + return 0 +} + +func (x *Request) GetMetadata() []*KeyValue { + if x != nil { + return x.Metadata + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{1} +} + +func (x *Response) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *Response) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type StringList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + List []string `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (x *StringList) Reset() { + *x = StringList{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringList) ProtoMessage() {} + +func (x *StringList) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringList.ProtoReflect.Descriptor instead. +func (*StringList) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{2} +} + +func (x *StringList) GetList() []string { + if x != nil { + return x.List + } + return nil +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_github_com_containerd_ttrpc_request_proto protoreflect.FileDescriptor + +var file_github_com_containerd_ttrpc_request_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x74, 0x72, + 0x70, 0x63, 0x1a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x45, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x20, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x1d, 0x5a, 0x1b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_github_com_containerd_ttrpc_request_proto_rawDescOnce sync.Once + file_github_com_containerd_ttrpc_request_proto_rawDescData = file_github_com_containerd_ttrpc_request_proto_rawDesc +) + +func file_github_com_containerd_ttrpc_request_proto_rawDescGZIP() []byte { + file_github_com_containerd_ttrpc_request_proto_rawDescOnce.Do(func() { + file_github_com_containerd_ttrpc_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_ttrpc_request_proto_rawDescData) + }) + return file_github_com_containerd_ttrpc_request_proto_rawDescData +} + +var file_github_com_containerd_ttrpc_request_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_ttrpc_request_proto_goTypes = []interface{}{ + (*Request)(nil), // 0: ttrpc.Request + (*Response)(nil), // 1: ttrpc.Response + (*StringList)(nil), // 2: ttrpc.StringList + (*KeyValue)(nil), // 3: ttrpc.KeyValue + (*status.Status)(nil), // 4: Status +} +var file_github_com_containerd_ttrpc_request_proto_depIdxs = []int32{ + 3, // 0: ttrpc.Request.metadata:type_name -> ttrpc.KeyValue + 4, // 1: ttrpc.Response.status:type_name -> Status + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_ttrpc_request_proto_init() } +func file_github_com_containerd_ttrpc_request_proto_init() { + if File_github_com_containerd_ttrpc_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_ttrpc_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_ttrpc_request_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_ttrpc_request_proto_goTypes, + DependencyIndexes: file_github_com_containerd_ttrpc_request_proto_depIdxs, + MessageInfos: file_github_com_containerd_ttrpc_request_proto_msgTypes, + }.Build() + File_github_com_containerd_ttrpc_request_proto = out.File + file_github_com_containerd_ttrpc_request_proto_rawDesc = nil + file_github_com_containerd_ttrpc_request_proto_goTypes = nil + file_github_com_containerd_ttrpc_request_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/ttrpc/request.proto b/vendor/github.com/containerd/ttrpc/request.proto new file mode 100644 index 0000000000..37da334fc2 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/request.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package ttrpc; + +import "proto/status.proto"; + +option go_package = "github.com/containerd/ttrpc"; + +message Request { + string service = 1; + string method = 2; + bytes payload = 3; + int64 timeout_nano = 4; + repeated KeyValue metadata = 5; +} + +message Response { + Status status = 1; + bytes payload = 2; +} + +message StringList { + repeated string list = 1; +} + +message KeyValue { + string key = 1; + string value = 2; +} diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go new file mode 100644 index 0000000000..bb71de677b --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -0,0 +1,586 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "errors" + "io" + "math/rand" + "net" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/containerd/log" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Server struct { + config *serverConfig + services *serviceSet + codec codec + + mu sync.Mutex + listeners map[net.Listener]struct{} + connections map[*serverConn]struct{} // all connections to current state + done chan struct{} // marks point at which we stop serving requests +} + +func NewServer(opts ...ServerOpt) (*Server, error) { + config := &serverConfig{} + for _, opt := range opts { + if err := opt(config); err != nil { + return nil, err + } + } + if config.interceptor == nil { + config.interceptor = defaultServerInterceptor + } + + return &Server{ + config: config, + services: newServiceSet(config.interceptor), + done: make(chan struct{}), + listeners: make(map[net.Listener]struct{}), + connections: make(map[*serverConn]struct{}), + }, nil +} + +// Register registers a map of methods to method handlers +// TODO: Remove in 2.0, does not support streams +func (s *Server) Register(name string, methods map[string]Method) { + s.services.register(name, &ServiceDesc{Methods: methods}) +} + +func (s *Server) RegisterService(name string, desc *ServiceDesc) { + s.services.register(name, desc) +} + +func (s *Server) Serve(ctx context.Context, l net.Listener) error { + s.mu.Lock() + s.addListenerLocked(l) + defer s.closeListener(l) + + select { + case <-s.done: + s.mu.Unlock() + return ErrServerClosed + default: + } + s.mu.Unlock() + + var ( + backoff time.Duration + handshaker = s.config.handshaker + ) + + if handshaker == nil { + handshaker = handshakerFunc(noopHandshake) + } + + for { + conn, err := l.Accept() + if err != nil { + select { + case <-s.done: + return ErrServerClosed + default: + } + + if terr, ok := err.(interface { + Temporary() bool + }); ok && terr.Temporary() { + if backoff == 0 { + backoff = time.Millisecond + } else { + backoff *= 2 + } + + if max := time.Second; backoff > max { + backoff = max + } + + sleep := time.Duration(rand.Int63n(int64(backoff))) + log.G(ctx).WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep) + time.Sleep(sleep) + continue + } + + return err + } + + backoff = 0 + + approved, handshake, err := handshaker.Handshake(ctx, conn) + if err != nil { + log.G(ctx).WithError(err).Error("ttrpc: refusing connection after handshake") + conn.Close() + continue + } + + sc, err := s.newConn(approved, handshake) + if err != nil { + log.G(ctx).WithError(err).Error("ttrpc: create connection failed") + conn.Close() + continue + } + + go sc.run(ctx) + } +} + +func (s *Server) Shutdown(ctx context.Context) error { + s.mu.Lock() + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + lnerr := s.closeListeners() + s.mu.Unlock() + + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + s.closeIdleConns() + + if s.countConnection() == 0 { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } + + return lnerr +} + +// Close the server without waiting for active connections. +func (s *Server) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + + err := s.closeListeners() + for c := range s.connections { + c.close() + delete(s.connections, c) + } + + return err +} + +func (s *Server) addListenerLocked(l net.Listener) { + s.listeners[l] = struct{}{} +} + +func (s *Server) closeListener(l net.Listener) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.closeListenerLocked(l) +} + +func (s *Server) closeListenerLocked(l net.Listener) error { + defer delete(s.listeners, l) + return l.Close() +} + +func (s *Server) closeListeners() error { + var err error + for l := range s.listeners { + if cerr := s.closeListenerLocked(l); cerr != nil && err == nil { + err = cerr + } + } + return err +} + +func (s *Server) addConnection(c *serverConn) error { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + return ErrServerClosed + default: + } + + s.connections[c] = struct{}{} + return nil +} + +func (s *Server) delConnection(c *serverConn) { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.connections, c) +} + +func (s *Server) countConnection() int { + s.mu.Lock() + defer s.mu.Unlock() + + return len(s.connections) +} + +func (s *Server) closeIdleConns() { + s.mu.Lock() + defer s.mu.Unlock() + + for c := range s.connections { + if st, ok := c.getState(); !ok || st == connStateActive { + continue + } + c.close() + delete(s.connections, c) + } +} + +type connState int + +const ( + connStateActive = iota + 1 // outstanding requests + connStateIdle // no requests + connStateClosed // closed connection +) + +func (cs connState) String() string { + switch cs { + case connStateActive: + return "active" + case connStateIdle: + return "idle" + case connStateClosed: + return "closed" + default: + return "unknown" + } +} + +func (s *Server) newConn(conn net.Conn, handshake interface{}) (*serverConn, error) { + c := &serverConn{ + server: s, + conn: conn, + handshake: handshake, + shutdown: make(chan struct{}), + } + c.setState(connStateIdle) + if err := s.addConnection(c); err != nil { + c.close() + return nil, err + } + return c, nil +} + +type serverConn struct { + server *Server + conn net.Conn + handshake interface{} // data from handshake, not used for now + state atomic.Value + + shutdownOnce sync.Once + shutdown chan struct{} // forced shutdown, used by close +} + +func (c *serverConn) getState() (connState, bool) { + cs, ok := c.state.Load().(connState) + return cs, ok +} + +func (c *serverConn) setState(newstate connState) { + c.state.Store(newstate) +} + +func (c *serverConn) close() error { + c.shutdownOnce.Do(func() { + close(c.shutdown) + }) + + return nil +} + +func (c *serverConn) run(sctx context.Context) { + type ( + response struct { + id uint32 + status *status.Status + data []byte + closeStream bool + streaming bool + } + ) + + var ( + ch = newChannel(c.conn) + ctx, cancel = context.WithCancel(sctx) + state connState = connStateIdle + responses = make(chan response) + recvErr = make(chan error, 1) + done = make(chan struct{}) + streams = sync.Map{} + active int32 + lastStreamID uint32 + ) + + defer c.conn.Close() + defer cancel() + defer close(done) + defer c.server.delConnection(c) + + sendStatus := func(id uint32, st *status.Status) bool { + select { + case responses <- response{ + // even though we've had an invalid stream id, we send it + // back on the same stream id so the client knows which + // stream id was bad. + id: id, + status: st, + closeStream: true, + }: + return true + case <-c.shutdown: + return false + case <-done: + return false + } + } + + go func(recvErr chan error) { + defer close(recvErr) + for { + select { + case <-c.shutdown: + return + case <-done: + return + default: // proceed + } + + mh, p, err := ch.recv() + if err != nil { + status, ok := status.FromError(err) + if !ok { + recvErr <- err + return + } + + // in this case, we send an error for that particular message + // when the status is defined. + if !sendStatus(mh.StreamID, status) { + return + } + + continue + } + + if mh.StreamID%2 != 1 { + // enforce odd client initiated identifiers. + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { + return + } + continue + } + + if mh.Type == messageTypeData { + i, ok := streams.Load(mh.StreamID) + if !ok { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID is no longer active")) { + return + } + } + sh := i.(*streamHandler) + if mh.Flags&flagNoData != flagNoData { + unmarshal := func(obj interface{}) error { + err := protoUnmarshal(p, obj) + ch.putmbuf(p) + return err + } + + if err := sh.data(unmarshal); err != nil { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data handling error: %v", err)) { + return + } + } + } + + if mh.Flags&flagRemoteClosed == flagRemoteClosed { + sh.closeSend() + if len(p) > 0 { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data close message cannot include data")) { + return + } + } + } + } else if mh.Type == messageTypeRequest { + if mh.StreamID <= lastStreamID { + // enforce odd client initiated identifiers. + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID cannot be re-used and must increment")) { + return + } + continue + + } + lastStreamID = mh.StreamID + + // TODO: Make request type configurable + // Unmarshaller which takes in a byte array and returns an interface? + var req Request + if err := c.server.codec.Unmarshal(p, &req); err != nil { + ch.putmbuf(p) + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { + return + } + continue + } + ch.putmbuf(p) + + id := mh.StreamID + respond := func(status *status.Status, data []byte, streaming, closeStream bool) error { + select { + case responses <- response{ + id: id, + status: status, + data: data, + closeStream: closeStream, + streaming: streaming, + }: + case <-done: + return ErrClosed + } + return nil + } + sh, err := c.server.services.handle(ctx, &req, respond) + if err != nil { + status, _ := status.FromError(err) + if !sendStatus(mh.StreamID, status) { + return + } + continue + } + + streams.Store(id, sh) + atomic.AddInt32(&active, 1) + } + // TODO: else we must ignore this for future compat. log this? + } + }(recvErr) + + for { + var ( + newstate connState + shutdown chan struct{} + ) + + activeN := atomic.LoadInt32(&active) + if activeN > 0 { + newstate = connStateActive + shutdown = nil + } else { + newstate = connStateIdle + shutdown = c.shutdown // only enable this branch in idle mode + } + if newstate != state { + c.setState(newstate) + state = newstate + } + + select { + case response := <-responses: + if !response.streaming || response.status.Code() != codes.OK { + p, err := c.server.codec.Marshal(&Response{ + Status: response.status.Proto(), + Payload: response.data, + }) + if err != nil { + log.G(ctx).WithError(err).Error("failed marshaling response") + return + } + + if err := ch.send(response.id, messageTypeResponse, 0, p); err != nil { + log.G(ctx).WithError(err).Error("failed sending message on channel") + return + } + } else { + var flags uint8 + if response.closeStream { + flags = flagRemoteClosed + } + if response.data == nil { + flags = flags | flagNoData + } + if err := ch.send(response.id, messageTypeData, flags, response.data); err != nil { + log.G(ctx).WithError(err).Error("failed sending message on channel") + return + } + } + + if response.closeStream { + // The ttrpc protocol currently does not support the case where + // the server is localClosed but not remoteClosed. Once the server + // is closing, the whole stream may be considered finished + streams.Delete(response.id) + atomic.AddInt32(&active, -1) + } + case err := <-recvErr: + // TODO(stevvooe): Not wildly clear what we should do in this + // branch. Basically, it means that we are no longer receiving + // requests due to a terminal error. + recvErr = nil // connection is now "closing" + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET) { + // The client went away and we should stop processing + // requests, so that the client connection is closed + return + } + log.G(ctx).WithError(err).Error("error receiving message") + // else, initiate shutdown + case <-shutdown: + return + } + } +} + +var noopFunc = func() {} + +func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) { + if len(req.Metadata) > 0 { + md := MD{} + md.fromRequest(req) + ctx = WithMetadata(ctx, md) + } + + cancel = noopFunc + if req.TimeoutNano == 0 { + return ctx, cancel + } + + ctx, cancel = context.WithTimeout(ctx, time.Duration(req.TimeoutNano)) + return ctx, cancel +} diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go new file mode 100644 index 0000000000..6d092bf950 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/services.go @@ -0,0 +1,279 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path" + "unsafe" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) + +type StreamHandler func(context.Context, StreamServer) (interface{}, error) + +type Stream struct { + Handler StreamHandler + StreamingClient bool + StreamingServer bool +} + +type ServiceDesc struct { + Methods map[string]Method + Streams map[string]Stream +} + +type serviceSet struct { + services map[string]*ServiceDesc + unaryInterceptor UnaryServerInterceptor + streamInterceptor StreamServerInterceptor +} + +func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { + return &serviceSet{ + services: make(map[string]*ServiceDesc), + unaryInterceptor: interceptor, + streamInterceptor: defaultStreamServerInterceptor, + } +} + +func (s *serviceSet) register(name string, desc *ServiceDesc) { + if _, ok := s.services[name]; ok { + panic(fmt.Errorf("duplicate service %v registered", name)) + } + + s.services[name] = desc +} + +func (s *serviceSet) unaryCall(ctx context.Context, method Method, info *UnaryServerInfo, data []byte) (p []byte, st *status.Status) { + unmarshal := func(obj interface{}) error { + return protoUnmarshal(data, obj) + } + + resp, err := s.unaryInterceptor(ctx, unmarshal, info, method) + if err == nil { + if isNil(resp) { + err = errors.New("ttrpc: marshal called with nil") + } else { + p, err = protoMarshal(resp) + } + } + + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) + } + + return p, st +} + +func (s *serviceSet) streamCall(ctx context.Context, stream StreamHandler, info *StreamServerInfo, ss StreamServer) (p []byte, st *status.Status) { + resp, err := s.streamInterceptor(ctx, ss, info, stream) + if err == nil { + p, err = protoMarshal(resp) + } + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) + } + return +} + +func (s *serviceSet) handle(ctx context.Context, req *Request, respond func(*status.Status, []byte, bool, bool) error) (*streamHandler, error) { + srv, ok := s.services[req.Service] + if !ok { + return nil, status.Errorf(codes.Unimplemented, "service %v", req.Service) + } + + if method, ok := srv.Methods[req.Method]; ok { + go func() { + ctx, cancel := getRequestContext(ctx, req) + defer cancel() + + info := &UnaryServerInfo{ + FullMethod: fullPath(req.Service, req.Method), + } + p, st := s.unaryCall(ctx, method, info, req.Payload) + + respond(st, p, false, true) + }() + return nil, nil + } + if stream, ok := srv.Streams[req.Method]; ok { + ctx, cancel := getRequestContext(ctx, req) + info := &StreamServerInfo{ + FullMethod: fullPath(req.Service, req.Method), + StreamingClient: stream.StreamingClient, + StreamingServer: stream.StreamingServer, + } + sh := &streamHandler{ + ctx: ctx, + respond: respond, + recv: make(chan Unmarshaler, 5), + info: info, + } + go func() { + defer cancel() + p, st := s.streamCall(ctx, stream.Handler, info, sh) + respond(st, p, stream.StreamingServer, true) + }() + + // Empty proto messages serialized to 0 payloads, + // so signatures like: rpc Stream(google.protobuf.Empty) returns (stream Data); + // don't get invoked here, which causes hang on client side. + // See https://github.com/containerd/ttrpc/issues/126 + if req.Payload != nil || !info.StreamingClient { + unmarshal := func(obj interface{}) error { + return protoUnmarshal(req.Payload, obj) + } + if err := sh.data(unmarshal); err != nil { + return nil, err + } + } + + return sh, nil + } + return nil, status.Errorf(codes.Unimplemented, "method %v", req.Method) +} + +type streamHandler struct { + ctx context.Context + respond func(*status.Status, []byte, bool, bool) error + recv chan Unmarshaler + info *StreamServerInfo + + remoteClosed bool + localClosed bool +} + +func (s *streamHandler) closeSend() { + if !s.remoteClosed { + s.remoteClosed = true + close(s.recv) + } +} + +func (s *streamHandler) data(unmarshal Unmarshaler) error { + if s.remoteClosed { + return ErrStreamClosed + } + select { + case s.recv <- unmarshal: + return nil + case <-s.ctx.Done(): + return s.ctx.Err() + } +} + +func (s *streamHandler) SendMsg(m interface{}) error { + if s.localClosed { + return ErrStreamClosed + } + p, err := protoMarshal(m) + if err != nil { + return err + } + return s.respond(nil, p, true, false) +} + +func (s *streamHandler) RecvMsg(m interface{}) error { + select { + case unmarshal, ok := <-s.recv: + if !ok { + return io.EOF + } + return unmarshal(m) + case <-s.ctx.Done(): + return s.ctx.Err() + + } +} + +func protoUnmarshal(p []byte, obj interface{}) error { + switch v := obj.(type) { + case proto.Message: + if err := proto.Unmarshal(p, v); err != nil { + return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) + } + default: + return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + } + return nil +} + +func protoMarshal(obj interface{}) ([]byte, error) { + if obj == nil { + return nil, nil + } + + switch v := obj.(type) { + case proto.Message: + r, err := proto.Marshal(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error()) + } + + return r, nil + default: + return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v) + } +} + +// convertCode maps stdlib go errors into grpc space. +// +// This is ripped from the grpc-go code base. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +func fullPath(service, method string) string { + return "/" + path.Join(service, method) +} + +func isNil(resp interface{}) bool { + return (*[2]uintptr)(unsafe.Pointer(&resp))[1] == 0 +} diff --git a/vendor/github.com/containerd/ttrpc/stream.go b/vendor/github.com/containerd/ttrpc/stream.go new file mode 100644 index 0000000000..739a4c9675 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/stream.go @@ -0,0 +1,84 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "sync" +) + +type streamID uint32 + +type streamMessage struct { + header messageHeader + payload []byte +} + +type stream struct { + id streamID + sender sender + recv chan *streamMessage + + closeOnce sync.Once + recvErr error + recvClose chan struct{} +} + +func newStream(id streamID, send sender) *stream { + return &stream{ + id: id, + sender: send, + recv: make(chan *streamMessage, 1), + recvClose: make(chan struct{}), + } +} + +func (s *stream) closeWithError(err error) error { + s.closeOnce.Do(func() { + if err != nil { + s.recvErr = err + } else { + s.recvErr = ErrClosed + } + close(s.recvClose) + }) + return nil +} + +func (s *stream) send(mt messageType, flags uint8, b []byte) error { + return s.sender.send(uint32(s.id), mt, flags, b) +} + +func (s *stream) receive(ctx context.Context, msg *streamMessage) error { + select { + case <-s.recvClose: + return s.recvErr + default: + } + select { + case <-s.recvClose: + return s.recvErr + case s.recv <- msg: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +type sender interface { + send(uint32, messageType, uint8, []byte) error +} diff --git a/vendor/github.com/containerd/ttrpc/stream_server.go b/vendor/github.com/containerd/ttrpc/stream_server.go new file mode 100644 index 0000000000..b6d1ba720a --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/stream_server.go @@ -0,0 +1,22 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +type StreamServer interface { + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} diff --git a/vendor/github.com/containerd/ttrpc/test.proto b/vendor/github.com/containerd/ttrpc/test.proto new file mode 100644 index 0000000000..0e114d5568 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/test.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package ttrpc; + +option go_package = "github.com/containerd/ttrpc/internal"; + +message TestPayload { + string foo = 1; + int64 deadline = 2; + string metadata = 3; +} + +message EchoPayload { + int64 seq = 1; + string msg = 2; +} diff --git a/vendor/github.com/containerd/ttrpc/unixcreds_linux.go b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go new file mode 100644 index 0000000000..c82c9f9d4c --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go @@ -0,0 +1,105 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +type UnixCredentialsFunc func(*unix.Ucred) error + +func (fn UnixCredentialsFunc) Handshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { + uc, err := requireUnixSocket(conn) + if err != nil { + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: require unix socket: %w", err) + } + + rs, err := uc.SyscallConn() + if err != nil { + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed: %w", err) + } + var ( + ucred *unix.Ucred + ucredErr error + ) + if err := rs.Control(func(fd uintptr) { + ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED) + }); err != nil { + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed: %w", err) + } + + if ucredErr != nil { + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", ucredErr) + } + + if err := fn(ucred); err != nil { + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: credential check failed: %w", err) + } + + return uc, ucred, nil +} + +// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID. +// +// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an +// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001. +// So calling this function with uid=0 allows a connection from effective UID 0 but rejects +// a connection from effective UID 1001. +// +// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)." +func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc { + return func(ucred *unix.Ucred) error { + return requireUidGid(ucred, uid, gid) + } +} + +func UnixSocketRequireRoot() UnixCredentialsFunc { + return UnixSocketRequireUidGid(0, 0) +} + +// UnixSocketRequireSameUser resolves the current effective unix user and returns a +// UnixCredentialsFunc that will validate incoming unix connections against the +// current credentials. +// +// This is useful when using abstract sockets that are accessible by all users. +func UnixSocketRequireSameUser() UnixCredentialsFunc { + euid, egid := os.Geteuid(), os.Getegid() + return UnixSocketRequireUidGid(euid, egid) +} + +func requireUidGid(ucred *unix.Ucred, uid, gid int) error { + if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { + return fmt.Errorf("ttrpc: invalid credentials: %v", syscall.EPERM) + } + return nil +} + +func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) { + uc, ok := conn.(*net.UnixConn) + if !ok { + return nil, errors.New("a unix socket connection is required") + } + + return uc, nil +} diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go index 0d82a2dd3c..6ac26949e6 100644 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -15,7 +15,7 @@ package libcni // Note this is the actual implementation of the CNI specification, which -// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// is reflected in the SPEC.md file. // it is typically bundled into runtime providers (i.e. containerd or cri-o would use this // before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, // to add an IP to a container, to parse the configuration of the CNI and so on. @@ -23,10 +23,11 @@ package libcni import ( "context" "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" + "sort" "strings" "github.com/containernetworking/cni/pkg/invoke" @@ -38,6 +39,8 @@ import ( var ( CacheDir = "/var/lib/cni" + // slightly awkward wording to preserve anyone matching on error strings + ErrorCheckNotSupp = fmt.Errorf("does not support the CHECK command") ) const ( @@ -64,17 +67,37 @@ type RuntimeConf struct { CacheDir string } -type NetworkConfig struct { - Network *types.NetConf +// Use PluginConfig instead of NetworkConfig, the NetworkConfig +// backwards-compat alias will be removed in a future release. +type NetworkConfig = PluginConfig + +type PluginConfig struct { + Network *types.PluginConf Bytes []byte } type NetworkConfigList struct { - Name string - CNIVersion string - DisableCheck bool - Plugins []*NetworkConfig - Bytes []byte + Name string + CNIVersion string + DisableCheck bool + DisableGC bool + LoadOnlyInlinedPlugins bool + Plugins []*PluginConfig + Bytes []byte +} + +type NetworkAttachment struct { + ContainerID string + Network string + IfName string + Config []byte + NetNS string + CniArgs [][2]string + CapabilityArgs map[string]interface{} +} + +type GCArgs struct { + ValidAttachments []types.GCAttachment } type CNI interface { @@ -84,14 +107,21 @@ type CNI interface { GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) - AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) - CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error - DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error - GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) - GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + AddNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *PluginConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *PluginConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) - ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) + ValidateNetwork(ctx context.Context, net *PluginConfig) ([]string, error) + + GCNetworkList(ctx context.Context, net *NetworkConfigList, args *GCArgs) error + GetStatusNetworkList(ctx context.Context, net *NetworkConfigList) error + + GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) + + GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) } type CNIConfig struct { @@ -122,7 +152,7 @@ func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) } } -func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { +func buildOneConfig(name, cniVersion string, orig *PluginConfig, prevResult types.Result, rt *RuntimeConf) (*PluginConfig, error) { var err error inject := map[string]interface{}{ @@ -139,8 +169,11 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ if err != nil { return nil, err } + if rt != nil { + return injectRuntimeConfig(orig, rt) + } - return injectRuntimeConfig(orig, rt) + return orig, nil } // This function takes a libcni RuntimeConf structure and injects values into @@ -155,7 +188,7 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ // capabilities include "portMappings", and the CapabilityArgs map includes a // "portMappings" key, that key and its value are added to the "runtimeConfig" // dictionary to be passed to the plugin's stdin. -func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { +func injectRuntimeConfig(orig *PluginConfig, rt *RuntimeConf) (*PluginConfig, error) { var err error rc := make(map[string]interface{}) @@ -195,6 +228,7 @@ type cachedInfo struct { Config []byte `json:"config"` IfName string `json:"ifName"` NetworkName string `json:"networkName"` + NetNS string `json:"netns,omitempty"` CniArgs [][2]string `json:"cniArgs,omitempty"` CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` RawResult map[string]interface{} `json:"result,omitempty"` @@ -229,6 +263,7 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, Config: config, IfName: rt.IfName, NetworkName: netName, + NetNS: rt.NetNS, CniArgs: rt.Args, CapabilityArgs: rt.CapabilityArgs, } @@ -254,11 +289,11 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(fname), 0o700); err != nil { return err } - return ioutil.WriteFile(fname, newBytes, 0600) + return os.WriteFile(fname, newBytes, 0o600) } func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { @@ -277,7 +312,7 @@ func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *R if err != nil { return nil, nil, err } - bytes, err = ioutil.ReadFile(fname) + bytes, err = os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil, nil @@ -305,7 +340,7 @@ func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *Runtim if err != nil { return nil, err } - data, err := ioutil.ReadFile(fname) + data, err := os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil @@ -333,7 +368,7 @@ func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) if err != nil { return nil, err } - fdata, err := ioutil.ReadFile(fname) + fdata, err := os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil @@ -374,7 +409,7 @@ func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *Runt // GetNetworkCachedResult returns the cached Result of the previous // AddNetwork() operation for a network, or an error. -func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { +func (c *CNIConfig) GetNetworkCachedResult(net *PluginConfig, rt *RuntimeConf) (types.Result, error) { return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) } @@ -386,11 +421,73 @@ func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *Runt // GetNetworkCachedConfig copies the input RuntimeConf to output // RuntimeConf with fields updated with info from the cached Config. -func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { +func (c *CNIConfig) GetNetworkCachedConfig(net *PluginConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { return c.getCachedConfig(net.Network.Name, rt) } -func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { +// GetCachedAttachments returns a list of network attachments from the cache. +// The returned list will be filtered by the containerID if the value is not empty. +func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) { + dirPath := filepath.Join(c.getCacheDir(&RuntimeConf{}), "results") + entries, err := os.ReadDir(dirPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + fileNames := make([]string, 0, len(entries)) + for _, e := range entries { + fileNames = append(fileNames, e.Name()) + } + sort.Strings(fileNames) + + attachments := []*NetworkAttachment{} + for _, fname := range fileNames { + if len(containerID) > 0 { + part := fmt.Sprintf("-%s-", containerID) + pos := strings.Index(fname, part) + if pos <= 0 || pos+len(part) >= len(fname) { + continue + } + } + + cacheFile := filepath.Join(dirPath, fname) + bytes, err := os.ReadFile(cacheFile) + if err != nil { + continue + } + + cachedInfo := cachedInfo{} + + if err := json.Unmarshal(bytes, &cachedInfo); err != nil { + continue + } + if cachedInfo.Kind != CNICacheV1 { + continue + } + if len(containerID) > 0 && cachedInfo.ContainerID != containerID { + continue + } + if cachedInfo.IfName == "" || cachedInfo.NetworkName == "" { + continue + } + + attachments = append(attachments, &NetworkAttachment{ + ContainerID: cachedInfo.ContainerID, + Network: cachedInfo.NetworkName, + IfName: cachedInfo.IfName, + Config: cachedInfo.Config, + NetNS: cachedInfo.NetNS, + CniArgs: cachedInfo.CniArgs, + CapabilityArgs: cachedInfo.CapabilityArgs, + }) + } + return attachments, nil +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *PluginConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { c.ensureExec() pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) if err != nil { @@ -432,7 +529,7 @@ func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, return result, nil } -func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *PluginConfig, prevResult types.Result, rt *RuntimeConf) error { c.ensureExec() pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) if err != nil { @@ -453,7 +550,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + return fmt.Errorf("configuration version %q %w", list.CNIVersion, ErrorCheckNotSupp) } if list.DisableCheck { @@ -474,7 +571,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis return nil } -func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *PluginConfig, prevResult types.Result, rt *RuntimeConf) error { c.ensureExec() pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) if err != nil { @@ -497,9 +594,9 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if gtet { - cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + if cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt); err != nil { + _ = c.cacheDel(list.Name, rt) + cachedResult = nil } } @@ -509,12 +606,13 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) } } + _ = c.cacheDel(list.Name, rt) return nil } -func pluginDescription(net *types.NetConf) string { +func pluginDescription(net *types.PluginConf) string { if net == nil { return "" } @@ -528,7 +626,7 @@ func pluginDescription(net *types.NetConf) string { } // AddNetwork executes the plugin with the ADD command -func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { +func (c *CNIConfig) AddNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) (types.Result, error) { result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) if err != nil { return nil, err @@ -542,12 +640,12 @@ func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *Runt } // CheckNetwork executes the plugin with the CHECK command -func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error { // CHECK was added in CNI spec version 0.4.0 and higher if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { return err } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + return fmt.Errorf("configuration version %q %w", net.Network.CNIVersion, ErrorCheckNotSupp) } cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) @@ -558,7 +656,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru } // DelNetwork executes the plugin with the DEL command -func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { +func (c *CNIConfig) DelNetwork(ctx context.Context, net *PluginConfig, rt *RuntimeConf) error { var cachedResult types.Result // Cached result on DEL was added in CNI spec version 0.4.0 and higher @@ -618,7 +716,7 @@ func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfig // ValidateNetwork checks that a configuration is reasonably valid. // It uses the same logic as ValidateNetworkList) // Returns a list of capabilities -func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *PluginConfig) ([]string, error) { caps := []string{} for c, ok := range net.Network.Capabilities { if ok { @@ -666,6 +764,129 @@ func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (vers return invoke.GetVersionInfo(ctx, pluginPath, c.exec) } +// GCNetworkList will do two things +// - dump the list of cached attachments, and issue deletes as necessary +// - issue a GC to the underlying plugins (if the version is high enough) +func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, args *GCArgs) error { + // If DisableGC is set, then don't bother GCing at all. + if list.DisableGC { + return nil + } + + // First, get the list of cached attachments + cachedAttachments, err := c.GetCachedAttachments("") + if err != nil { + return nil + } + + var validAttachments map[types.GCAttachment]interface{} + if args != nil { + validAttachments = make(map[types.GCAttachment]interface{}, len(args.ValidAttachments)) + for _, a := range args.ValidAttachments { + validAttachments[a] = nil + } + } + + var errs []error + + for _, cachedAttachment := range cachedAttachments { + if cachedAttachment.Network != list.Name { + continue + } + // we found this attachment + gca := types.GCAttachment{ + ContainerID: cachedAttachment.ContainerID, + IfName: cachedAttachment.IfName, + } + if _, ok := validAttachments[gca]; ok { + continue + } + // otherwise, this attachment wasn't valid and we should issue a CNI DEL + rt := RuntimeConf{ + ContainerID: cachedAttachment.ContainerID, + NetNS: cachedAttachment.NetNS, + IfName: cachedAttachment.IfName, + Args: cachedAttachment.CniArgs, + CapabilityArgs: cachedAttachment.CapabilityArgs, + } + if err := c.DelNetworkList(ctx, list, &rt); err != nil { + errs = append(errs, fmt.Errorf("failed to delete stale attachment %s %s: %w", rt.ContainerID, rt.IfName, err)) + } + } + + // now, if the version supports it, issue a GC + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); gt { + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + if args != nil { + inject["cni.dev/valid-attachments"] = args.ValidAttachments + // #1101: spec used incorrect variable name + inject["cni.dev/attachments"] = args.ValidAttachments + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate configuration to GC plugin %s: %w", plugin.Network.Type, err)) + } + if err := c.gcNetwork(ctx, pluginConfig); err != nil { + errs = append(errs, fmt.Errorf("failed to GC plugin %s: %w", plugin.Network.Type, err)) + } + } + } + + return errors.Join(errs...) +} + +func (c *CNIConfig) gcNetwork(ctx context.Context, net *PluginConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("GC", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + +func (c *CNIConfig) GetStatusNetworkList(ctx context.Context, list *NetworkConfigList) error { + // If the version doesn't support status, abort. + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); !gt { + return nil + } + + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + return fmt.Errorf("failed to generate configuration to get plugin STATUS %s: %w", plugin.Network.Type, err) + } + if err := c.getStatusNetwork(ctx, pluginConfig); err != nil { + return err // Don't collect errors here, so we return a clean error code. + } + } + return nil +} + +func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *PluginConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("STATUS", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + // ===== func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { return &invoke.Args{ diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index 3cd6a59d1c..7f8482e75e 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -16,13 +16,16 @@ package libcni import ( "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" + "slices" "sort" + "strings" "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" ) type NotFoundError struct { @@ -42,9 +45,16 @@ func (e NoConfigsFoundError) Error() string { return fmt.Sprintf(`no net configurations found in %s`, e.Dir) } -func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { - conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} - if err := json.Unmarshal(bytes, conf.Network); err != nil { +// This will not validate that the plugins actually belong to the netconfig by ensuring +// that they are loaded from a directory named after the networkName, relative to the network config. +// +// Since here we are just accepting raw bytes, the caller is responsible for ensuring that the plugin +// config provided here actually "belongs" to the networkconfig in question. +func NetworkPluginConfFromBytes(pluginConfBytes []byte) (*PluginConfig, error) { + // TODO why are we creating a struct that holds both the byte representation and the deserialized + // representation, and returning that, instead of just returning the deserialized representation? + conf := &PluginConfig{Bytes: pluginConfBytes, Network: &types.PluginConf{}} + if err := json.Unmarshal(pluginConfBytes, conf.Network); err != nil { return nil, fmt.Errorf("error parsing configuration: %w", err) } if conf.Network.Type == "" { @@ -53,17 +63,35 @@ func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { return conf, nil } -func ConfFromFile(filename string) (*NetworkConfig, error) { - bytes, err := ioutil.ReadFile(filename) +// Given a path to a directory containing a network configuration, and the name of a network, +// loads all plugin definitions found at path `networkConfPath/networkName/*.conf` +func NetworkPluginConfsFromFiles(networkConfPath, networkName string) ([]*PluginConfig, error) { + var pConfs []*PluginConfig + + pluginConfPath := filepath.Join(networkConfPath, networkName) + + pluginConfFiles, err := ConfFiles(pluginConfPath, []string{".conf"}) if err != nil { - return nil, fmt.Errorf("error reading %s: %w", filename, err) + return nil, fmt.Errorf("failed to read plugin config files in %s: %w", pluginConfPath, err) } - return ConfFromBytes(bytes) + + for _, pluginConfFile := range pluginConfFiles { + pluginConfBytes, err := os.ReadFile(pluginConfFile) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", pluginConfFile, err) + } + pluginConf, err := NetworkPluginConfFromBytes(pluginConfBytes) + if err != nil { + return nil, err + } + pConfs = append(pConfs, pluginConf) + } + return pConfs, nil } -func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { +func NetworkConfFromBytes(confBytes []byte) (*NetworkConfigList, error) { rawList := make(map[string]interface{}) - if err := json.Unmarshal(bytes, &rawList); err != nil { + if err := json.Unmarshal(confBytes, &rawList); err != nil { return nil, fmt.Errorf("error parsing configuration list: %w", err) } @@ -85,26 +113,115 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } } - disableCheck := false - if rawDisableCheck, ok := rawList["disableCheck"]; ok { - disableCheck, ok = rawDisableCheck.(bool) + rawVersions, ok := rawList["cniVersions"] + if ok { + // Parse the current package CNI version + rvs, ok := rawVersions.([]interface{}) if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions: %T", rvs) } + vs := make([]string, 0, len(rvs)) + for i, rv := range rvs { + v, ok := rv.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions index %d: %T", i, rv) + } + gt, err := version.GreaterThan(v, version.Current()) + if err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersions entry %s at index %d: %w", v, i, err) + } else if !gt { + // Skip versions "greater" than this implementation of the spec + vs = append(vs, v) + } + } + + // if cniVersion was already set, append it to the list for sorting. + if cniVersion != "" { + gt, err := version.GreaterThan(cniVersion, version.Current()) + if err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion %s: %w", cniVersion, err) + } else if !gt { + // ignore any versions higher than the current implemented spec version + vs = append(vs, cniVersion) + } + } + slices.SortFunc[[]string](vs, func(v1, v2 string) int { + if v1 == v2 { + return 0 + } + if gt, _ := version.GreaterThan(v1, v2); gt { + return 1 + } + return -1 + }) + if len(vs) > 0 { + cniVersion = vs[len(vs)-1] + } + } + + readBool := func(key string) (bool, error) { + rawVal, ok := rawList[key] + if !ok { + return false, nil + } + if b, ok := rawVal.(bool); ok { + return b, nil + } + + s, ok := rawVal.(string) + if !ok { + return false, fmt.Errorf("error parsing configuration list: invalid type %T for %s", rawVal, key) + } + s = strings.ToLower(s) + switch s { + case "false": + return false, nil + case "true": + return true, nil + } + return false, fmt.Errorf("error parsing configuration list: invalid value %q for %s", s, key) + } + + disableCheck, err := readBool("disableCheck") + if err != nil { + return nil, err + } + + disableGC, err := readBool("disableGC") + if err != nil { + return nil, err + } + + loadOnlyInlinedPlugins, err := readBool("loadOnlyInlinedPlugins") + if err != nil { + return nil, err } list := &NetworkConfigList{ - Name: name, - DisableCheck: disableCheck, - CNIVersion: cniVersion, - Bytes: bytes, + Name: name, + DisableCheck: disableCheck, + DisableGC: disableGC, + LoadOnlyInlinedPlugins: loadOnlyInlinedPlugins, + CNIVersion: cniVersion, + Bytes: confBytes, } var plugins []interface{} plug, ok := rawList["plugins"] - if !ok { - return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + // We can have a `plugins` list key in the main conf, + // We can also have `loadOnlyInlinedPlugins == true` + // + // If `plugins` is there, then `loadOnlyInlinedPlugins` can be true + // + // If plugins is NOT there, then `loadOnlyInlinedPlugins` cannot be true + // + // We have to have at least some plugins. + if !ok && loadOnlyInlinedPlugins { + return nil, fmt.Errorf("error parsing configuration list: `loadOnlyInlinedPlugins` is true, and no 'plugins' key") + } else if !ok && !loadOnlyInlinedPlugins { + return list, nil } + plugins, ok = plug.([]interface{}) if !ok { return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) @@ -124,24 +241,68 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } list.Plugins = append(list.Plugins, netConf) } - return list, nil } -func ConfListFromFile(filename string) (*NetworkConfigList, error) { - bytes, err := ioutil.ReadFile(filename) +func NetworkConfFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + + conf, err := NetworkConfFromBytes(bytes) + if err != nil { + return nil, err + } + + if !conf.LoadOnlyInlinedPlugins { + plugins, err := NetworkPluginConfsFromFiles(filepath.Dir(filename), conf.Name) + if err != nil { + return nil, err + } + conf.Plugins = append(conf.Plugins, plugins...) + } + + if len(conf.Plugins) == 0 { + // Having 0 plugins for a given network is not necessarily a problem, + // but return as error for caller to decide, since they tried to load + return nil, fmt.Errorf("no plugin configs found") + } + return conf, nil +} + +// Deprecated: This file format is no longer supported, use NetworkConfXXX and NetworkPluginXXX functions +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + return NetworkPluginConfFromBytes(bytes) +} + +// Deprecated: This file format is no longer supported, use NetworkConfXXX and NetworkPluginXXX functions +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("error reading %s: %w", filename, err) } - return ConfListFromBytes(bytes) + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + return NetworkConfFromBytes(bytes) +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + return NetworkConfFromFile(filename) } +// ConfFiles simply returns a slice of all files in the provided directory +// with extensions matching the provided set. func ConfFiles(dir string, extensions []string) ([]string, error) { // In part, adapted from rkt/networking/podenv.go#listFiles - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) switch { case err == nil: // break case os.IsNotExist(err): + // If folder not there, return no error - only return an + // error if we cannot read contents or there are no contents. return nil, nil default: return nil, err @@ -162,6 +323,7 @@ func ConfFiles(dir string, extensions []string) ([]string, error) { return confFiles, nil } +// Deprecated: This file format is no longer supported, use NetworkConfXXX and NetworkPluginXXX functions func LoadConf(dir, name string) (*NetworkConfig, error) { files, err := ConfFiles(dir, []string{".conf", ".json"}) switch { @@ -185,6 +347,15 @@ func LoadConf(dir, name string) (*NetworkConfig, error) { } func LoadConfList(dir, name string) (*NetworkConfigList, error) { + return LoadNetworkConf(dir, name) +} + +// LoadNetworkConf looks at all the network configs in a given dir, +// loads and parses them all, and returns the first one with an extension of `.conf` +// that matches the provided network name predicate. +func LoadNetworkConf(dir, name string) (*NetworkConfigList, error) { + // TODO this .conflist/.conf extension thing is confusing and inexact + // for implementors. We should pick one extension for everything and stick with it. files, err := ConfFiles(dir, []string{".conflist"}) if err != nil { return nil, err @@ -192,7 +363,7 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) { sort.Strings(files) for _, confFile := range files { - conf, err := ConfListFromFile(confFile) + conf, err := NetworkConfFromFile(confFile) if err != nil { return nil, err } @@ -201,12 +372,13 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) { } } - // Try and load a network configuration file (instead of list) + // Deprecated: Try and load a network configuration file (instead of list) // from the same name, then upconvert. singleConf, err := LoadConf(dir, name) if err != nil { // A little extra logic so the error makes sense - if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + var ncfErr NoConfigsFoundError + if len(files) != 0 && errors.As(err, &ncfErr) { // Config lists found but no config files found return nil, NotFoundError{dir, name} } @@ -216,7 +388,8 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) { return ConfListFromConf(singleConf) } -func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { +// InjectConf takes a PluginConfig and inserts additional values into it, ensuring the result is serializable. +func InjectConf(original *PluginConfig, newValues map[string]interface{}) (*PluginConfig, error) { config := make(map[string]interface{}) err := json.Unmarshal(original.Bytes, &config) if err != nil { @@ -240,12 +413,14 @@ func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*Net return nil, err } - return ConfFromBytes(newBytes) + return NetworkPluginConfFromBytes(newBytes) } // ConfListFromConf "upconverts" a network config in to a NetworkConfigList, // with the single network as the only entry in the list. -func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { +// +// Deprecated: Non-conflist file formats are unsupported, use NetworkConfXXX and NetworkPluginXXX functions +func ConfListFromConf(original *PluginConfig) (*NetworkConfigList, error) { // Re-deserialize the config's json, then make a raw map configlist. // This may seem a bit strange, but it's to make the Bytes fields // actually make sense. Otherwise, the generated json is littered with diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go index 8defe4dd39..c8b548e7c6 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -51,25 +51,34 @@ func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exe // DelegateCheck calls the given delegate plugin with the CNI CHECK action and // JSON configuration func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "CHECK") +} + +func delegateNoResult(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec, verb string) error { pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) if err != nil { return err } - // DelegateCheck will override the original CNI_COMMAND env from process with CHECK - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs(verb), realExec) } // DelegateDel calls the given delegate plugin with the CNI DEL action and // JSON configuration func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "DEL") +} - // DelegateDel will override the original CNI_COMMAND env from process with DEL - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +// DelegateStatus calls the given delegate plugin with the CNI STATUS action and +// JSON configuration +func DelegateStatus(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "STATUS") +} + +// DelegateGC calls the given delegate plugin with the CNI GC action and +// JSON configuration +func DelegateGC(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "GC") } // return CNIArgs used by delegation diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index 3ad07aa8f2..a5e015fc92 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -81,17 +81,17 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { // object to ExecPluginWithResult() to verify the incoming stdin and environment // and provide a tailored response: // -//import ( +// import ( // "encoding/json" // "path" // "strings" -//) +// ) // -//type fakeExec struct { +// type fakeExec struct { // version.PluginDecoder -//} +// } // -//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { // net := &types.NetConf{} // err := json.Unmarshal(stdinData, net) // if err != nil { @@ -109,14 +109,14 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { // } // } // return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil -//} +// } // -//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { // if len(paths) > 0 { // return path.Join(paths[0], plugin), nil // } // return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) -//} +// } func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { if exec == nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go index 9bcfb45536..ed0999bd0e 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package invoke diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go new file mode 100644 index 0000000000..cffe136178 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2022 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import "github.com/containernetworking/cni/pkg/types" + +func CheckNetNS(nsPath string) (bool, *types.Error) { + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go new file mode 100644 index 0000000000..3d58e75d6c --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go @@ -0,0 +1,50 @@ +// Copyright 2022 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "runtime" + + "github.com/vishvananda/netns" + + "github.com/containernetworking/cni/pkg/types" +) + +// Returns an object representing the current OS thread's network namespace +func getCurrentNS() (netns.NsHandle, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return netns.Get() +} + +func CheckNetNS(nsPath string) (bool, *types.Error) { + ns, err := netns.GetFromPath(nsPath) + // Let plugins check whether nsPath from args is valid. Also support CNI DEL for empty nsPath as already-deleted nsPath. + if err != nil { + return false, nil + } + defer ns.Close() + + pluginNS, err := getCurrentNS() + if err != nil { + return false, types.NewError(types.ErrInvalidNetNS, "get plugin's netns failed", "") + } + defer pluginNS.Close() + + return pluginNS.Equal(ns), nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go new file mode 100644 index 0000000000..cffe136178 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go @@ -0,0 +1,21 @@ +// Copyright 2022 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import "github.com/containernetworking/cni/pkg/types" + +func CheckNetNS(nsPath string) (bool, *types.Error) { + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go index cb8781972d..f29cf34594 100644 --- a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go +++ b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go @@ -19,13 +19,14 @@ package skel import ( "bytes" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "log" "os" "strings" + "github.com/containernetworking/cni/pkg/ns" "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/utils" "github.com/containernetworking/cni/pkg/version" @@ -34,12 +35,13 @@ import ( // CmdArgs captures all the arguments passed in to the plugin // via both env vars and stdin type CmdArgs struct { - ContainerID string - Netns string - IfName string - Args string - Path string - StdinData []byte + ContainerID string + Netns string + IfName string + Args string + Path string + NetnsOverride string + StdinData []byte } type dispatcher struct { @@ -55,21 +57,25 @@ type dispatcher struct { type reqForCmdEntry map[string]bool func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { - var cmd, contID, netns, ifName, args, path string + var cmd, contID, netns, ifName, args, path, netnsOverride string vars := []struct { - name string - val *string - reqForCmd reqForCmdEntry + name string + val *string + reqForCmd reqForCmdEntry + validateFn func(string) *types.Error }{ { "CNI_COMMAND", &cmd, reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, + "ADD": true, + "CHECK": true, + "DEL": true, + "GC": true, + "STATUS": true, }, + nil, }, { "CNI_CONTAINERID", @@ -79,6 +85,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": true, }, + utils.ValidateContainerID, }, { "CNI_NETNS", @@ -88,6 +95,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": false, }, + nil, }, { "CNI_IFNAME", @@ -97,6 +105,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": true, }, + utils.ValidateInterfaceName, }, { "CNI_ARGS", @@ -106,15 +115,29 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": false, "DEL": false, }, + nil, }, { "CNI_PATH", &path, reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, + "ADD": true, + "CHECK": true, + "DEL": true, + "GC": true, + "STATUS": true, + }, + nil, + }, + { + "CNI_NETNS_OVERRIDE", + &netnsOverride, + reqForCmdEntry{ + "ADD": false, + "CHECK": false, + "DEL": false, }, + nil, }, } @@ -125,6 +148,10 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { if v.reqForCmd[cmd] || v.name == "CNI_COMMAND" { argsMissing = append(argsMissing, v.name) } + } else if v.reqForCmd[cmd] && v.validateFn != nil { + if err := v.validateFn(*v.val); err != nil { + return "", nil, err + } } } @@ -137,18 +164,25 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { t.Stdin = bytes.NewReader(nil) } - stdinData, err := ioutil.ReadAll(t.Stdin) + stdinData, err := io.ReadAll(t.Stdin) if err != nil { return "", nil, types.NewError(types.ErrIOFailure, fmt.Sprintf("error reading from stdin: %v", err), "") } + if cmd != "VERSION" { + if err := validateConfig(stdinData); err != nil { + return "", nil, err + } + } + cmdArgs := &CmdArgs{ - ContainerID: contID, - Netns: netns, - IfName: ifName, - Args: args, - Path: path, - StdinData: stdinData, + ContainerID: contID, + Netns: netns, + IfName: ifName, + Args: args, + Path: path, + StdinData: stdinData, + NetnsOverride: netnsOverride, } return cmd, cmdArgs, nil } @@ -163,8 +197,13 @@ func (t *dispatcher) checkVersionAndCall(cmdArgs *CmdArgs, pluginVersionInfo ver return types.NewError(types.ErrIncompatibleCNIVersion, "incompatible CNI versions", verErr.Details()) } + if toCall == nil { + return nil + } + if err = toCall(cmdArgs); err != nil { - if e, ok := err.(*types.Error); ok { + var e *types.Error + if errors.As(err, &e) { // don't wrap Error in Error return e } @@ -190,7 +229,7 @@ func validateConfig(jsonBytes []byte) *types.Error { return nil } -func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { +func (t *dispatcher) pluginMain(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error { cmd, cmdArgs, err := t.getCmdArgsFromEnv() if err != nil { // Print the about string to stderr when no command is set @@ -202,21 +241,20 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, return err } - if cmd != "VERSION" { - if err = validateConfig(cmdArgs.StdinData); err != nil { - return err - } - if err = utils.ValidateContainerID(cmdArgs.ContainerID); err != nil { + switch cmd { + case "ADD": + err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Add) + if err != nil { return err } - if err = utils.ValidateInterfaceName(cmdArgs.IfName); err != nil { - return err + if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" { + isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns) + if checkErr != nil { + return checkErr + } else if isPluginNetNS { + return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "") + } } - } - - switch cmd { - case "ADD": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdAdd) case "CHECK": configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) if err != nil { @@ -232,7 +270,7 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, if err != nil { return types.NewError(types.ErrDecodingFailure, err.Error(), "") } else if gtet { - if err := t.checkVersionAndCall(cmdArgs, versionInfo, cmdCheck); err != nil { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Check); err != nil { return err } return nil @@ -240,7 +278,62 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, } return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow CHECK", "") case "DEL": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdDel) + err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Del) + if err != nil { + return err + } + if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" { + isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns) + if checkErr != nil { + return checkErr + } else if isPluginNetNS { + return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "") + } + } + case "GC": + configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } + if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if !gtet { + return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow GC", "") + } + for _, pluginVersion := range versionInfo.SupportedVersions() { + gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if gtet { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.GC); err != nil { + return err + } + return nil + } + } + return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow GC", "") + case "STATUS": + configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } + if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if !gtet { + return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow STATUS", "") + } + for _, pluginVersion := range versionInfo.SupportedVersions() { + gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if gtet { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Status); err != nil { + return err + } + return nil + } + } + return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow STATUS", "") case "VERSION": if err := versionInfo.Encode(t.Stdout); err != nil { return types.NewError(types.ErrIOFailure, err.Error(), "") @@ -264,13 +357,63 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, // // To let this package automatically handle errors and call os.Exit(1) for you, // use PluginMain() instead. +// +// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncsWithError instead. func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { + return PluginMainFuncsWithError(CNIFuncs{Add: cmdAdd, Check: cmdCheck, Del: cmdDel}, versionInfo, about) +} + +// CNIFuncs contains a group of callback command funcs to be passed in as +// parameters to the core "main" for a plugin. +type CNIFuncs struct { + Add func(_ *CmdArgs) error + Del func(_ *CmdArgs) error + Check func(_ *CmdArgs) error + GC func(_ *CmdArgs) error + Status func(_ *CmdArgs) error +} + +// PluginMainFuncsWithError is the core "main" for a plugin. It accepts +// callback functions defined within CNIFuncs and returns an error. +// +// The caller must also specify what CNI spec versions the plugin supports. +// +// It is the responsibility of the caller to check for non-nil error return. +// +// For a plugin to comply with the CNI spec, it must print any error to stdout +// as JSON and then exit with nonzero status code. +// +// To let this package automatically handle errors and call os.Exit(1) for you, +// use PluginMainFuncs() instead. +func PluginMainFuncsWithError(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error { return (&dispatcher{ Getenv: os.Getenv, Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, - }).pluginMain(cmdAdd, cmdCheck, cmdDel, versionInfo, about) + }).pluginMain(funcs, versionInfo, about) +} + +// PluginMainFuncs is the core "main" for a plugin which includes automatic error handling. +// This is a newer alternative func to PluginMain which abstracts CNI commands within a +// CNIFuncs interface. +// +// The caller must also specify what CNI spec versions the plugin supports. +// +// The caller can specify an "about" string, which is printed on stderr +// when no CNI_COMMAND is specified. The recommended output is "CNI plugin v" +// +// When an error occurs in any func in CNIFuncs, PluginMainFuncs will print the error +// as JSON to stdout and call os.Exit(1). +// +// To have more control over error handling, use PluginMainFuncsWithError() instead. +func PluginMainFuncs(funcs CNIFuncs, versionInfo version.PluginInfo, about string) { + if e := PluginMainFuncsWithError(funcs, versionInfo, about); e != nil { + if err := e.Print(); err != nil { + log.Print("Error writing error JSON to stdout: ", err) + } + os.Exit(1) + } } // PluginMain is the core "main" for a plugin which includes automatic error handling. @@ -284,6 +427,8 @@ func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versio // as JSON to stdout and call os.Exit(1). // // To have more control over error handling, use PluginMainWithError() instead. +// +// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncs instead. func PluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) { if e := PluginMainWithError(cmdAdd, cmdCheck, cmdDel, versionInfo, about); e != nil { if err := e.Print(); err != nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go index 0e1e8b857b..f58b91206d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -26,9 +26,10 @@ import ( convert "github.com/containernetworking/cni/pkg/types/internal" ) -const ImplementedSpecVersion string = "1.0.0" +// The types did not change between v1.0 and v1.1 +const ImplementedSpecVersion string = "1.1.0" -var supportedVersions = []string{ImplementedSpecVersion} +var supportedVersions = []string{"1.0.0", "1.1.0"} // Register converters for all versions less than the implemented spec version func init() { @@ -38,10 +39,14 @@ func init() { convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("1.0.0", []string{"1.1.0"}, convertFrom100) // Down-converters convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("1.1.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.1.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("1.1.0", []string{"1.0.0"}, convertFrom100) // Creator convert.RegisterCreator(supportedVersions, NewResult) @@ -90,12 +95,49 @@ type Result struct { DNS types.DNS `json:"dns,omitempty"` } +// Note: DNS should be omit if DNS is empty but default Marshal function +// will output empty structure hence need to write a Marshal function +func (r *Result) MarshalJSON() ([]byte, error) { + // use type alias to escape recursion for json.Marshal() to MarshalJSON() + type fixObjType = Result + + bytes, err := json.Marshal(fixObjType(*r)) //nolint:all + if err != nil { + return nil, err + } + + fixupObj := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &fixupObj); err != nil { + return nil, err + } + + if r.DNS.IsEmpty() { + delete(fixupObj, "dns") + } + + return json.Marshal(fixupObj) +} + +// convertFrom100 does nothing except set the version; the types are the same +func convertFrom100(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + + result := &Result{ + CNIVersion: toVersion, + Interfaces: fromResult.Interfaces, + IPs: fromResult.IPs, + Routes: fromResult.Routes, + DNS: fromResult.DNS, + } + return result, nil +} + func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { result040, err := convert.Convert(from, "0.4.0") if err != nil { return nil, err } - result100, err := convertFrom04x(result040, ImplementedSpecVersion) + result100, err := convertFrom04x(result040, toVersion) if err != nil { return nil, err } @@ -226,9 +268,12 @@ func (r *Result) PrintTo(writer io.Writer) error { // Interface contains values about the created interfaces type Interface struct { - Name string `json:"name"` - Mac string `json:"mac,omitempty"` - Sandbox string `json:"sandbox,omitempty"` + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Mtu int `json:"mtu,omitempty"` + Sandbox string `json:"sandbox,omitempty"` + SocketPath string `json:"socketPath,omitempty"` + PciID string `json:"pciID,omitempty"` } func (i *Interface) String() string { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go index 7516f03ef5..68a602bfdb 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -26,8 +26,8 @@ import ( type UnmarshallableBool bool // UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns boolean true if the string is "1" or "[Tt]rue" -// Returns boolean false if the string is "0" or "[Ff]alse" +// Returns boolean true if the string is "1" or "true" or "True" +// Returns boolean false if the string is "0" or "false" or "False” func (b *UnmarshallableBool) UnmarshalText(data []byte) error { s := strings.ToLower(string(data)) switch s { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go index ed28b33e8e..452cb62201 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -19,6 +19,9 @@ import ( "fmt" "github.com/containernetworking/cni/pkg/types" + _ "github.com/containernetworking/cni/pkg/types/020" + _ "github.com/containernetworking/cni/pkg/types/040" + _ "github.com/containernetworking/cni/pkg/types/100" convert "github.com/containernetworking/cni/pkg/types/internal" ) diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index fba17dfc0f..f4b3ce3535 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -56,31 +56,72 @@ func (n *IPNet) UnmarshalJSON(data []byte) error { return nil } -// NetConf describes a network. -type NetConf struct { +// Use PluginConf instead of NetConf, the NetConf +// backwards-compat alias will be removed in a future release. +type NetConf = PluginConf + +// PluginConf describes a plugin configuration for a specific network. +type PluginConf struct { CNIVersion string `json:"cniVersion,omitempty"` Name string `json:"name,omitempty"` Type string `json:"type,omitempty"` Capabilities map[string]bool `json:"capabilities,omitempty"` IPAM IPAM `json:"ipam,omitempty"` - DNS DNS `json:"dns"` + DNS DNS `json:"dns,omitempty"` RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` PrevResult Result `json:"-"` + + // ValidAttachments is only supplied when executing a GC operation + ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"` +} + +// GCAttachment is the parameters to a GC call -- namely, +// the container ID and ifname pair that represents a +// still-valid attachment. +type GCAttachment struct { + ContainerID string `json:"containerID"` + IfName string `json:"ifname"` +} + +// Note: DNS should be omit if DNS is empty but default Marshal function +// will output empty structure hence need to write a Marshal function +func (n *PluginConf) MarshalJSON() ([]byte, error) { + bytes, err := json.Marshal(*n) + if err != nil { + return nil, err + } + + fixupObj := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &fixupObj); err != nil { + return nil, err + } + + if n.DNS.IsEmpty() { + delete(fixupObj, "dns") + } + + return json.Marshal(fixupObj) } type IPAM struct { Type string `json:"type,omitempty"` } +// IsEmpty returns true if IPAM structure has no value, otherwise return false +func (i *IPAM) IsEmpty() bool { + return i.Type == "" +} + // NetConfList describes an ordered list of networks. type NetConfList struct { CNIVersion string `json:"cniVersion,omitempty"` - Name string `json:"name,omitempty"` - DisableCheck bool `json:"disableCheck,omitempty"` - Plugins []*NetConf `json:"plugins,omitempty"` + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + DisableGC bool `json:"disableGC,omitempty"` + Plugins []*PluginConf `json:"plugins,omitempty"` } // Result is an interface that provides the result of plugin execution @@ -116,31 +157,48 @@ type DNS struct { Options []string `json:"options,omitempty"` } +// IsEmpty returns true if DNS structure has no value, otherwise return false +func (d *DNS) IsEmpty() bool { + if len(d.Nameservers) == 0 && d.Domain == "" && len(d.Search) == 0 && len(d.Options) == 0 { + return true + } + return false +} + func (d *DNS) Copy() *DNS { if d == nil { return nil } to := &DNS{Domain: d.Domain} - for _, ns := range d.Nameservers { - to.Nameservers = append(to.Nameservers, ns) - } - for _, s := range d.Search { - to.Search = append(to.Search, s) - } - for _, o := range d.Options { - to.Options = append(to.Options, o) - } + to.Nameservers = append(to.Nameservers, d.Nameservers...) + to.Search = append(to.Search, d.Search...) + to.Options = append(to.Options, d.Options...) return to } type Route struct { - Dst net.IPNet - GW net.IP + Dst net.IPNet + GW net.IP + MTU int + AdvMSS int + Priority int + Table *int + Scope *int } func (r *Route) String() string { - return fmt.Sprintf("%+v", *r) + table := "" + if r.Table != nil { + table = fmt.Sprintf("%d", *r.Table) + } + + scope := "" + if r.Scope != nil { + scope = fmt.Sprintf("%d", *r.Scope) + } + + return fmt.Sprintf("{Dst:%+v GW:%v MTU:%d AdvMSS:%d Priority:%d Table:%s Scope:%s}", r.Dst, r.GW, r.MTU, r.AdvMSS, r.Priority, table, scope) } func (r *Route) Copy() *Route { @@ -148,14 +206,30 @@ func (r *Route) Copy() *Route { return nil } - return &Route{ - Dst: r.Dst, - GW: r.GW, + route := &Route{ + Dst: r.Dst, + GW: r.GW, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + Scope: r.Scope, + } + + if r.Table != nil { + table := *r.Table + route.Table = &table } + + if r.Scope != nil { + scope := *r.Scope + route.Scope = &scope + } + + return route } // Well known error codes -// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +// see https://github.com/containernetworking/cni/blob/main/SPEC.md#well-known-error-codes const ( ErrUnknown uint = iota // 0 ErrIncompatibleCNIVersion // 1 @@ -165,6 +239,7 @@ const ( ErrIOFailure // 5 ErrDecodingFailure // 6 ErrInvalidNetworkConfig // 7 + ErrInvalidNetNS // 8 ErrTryAgainLater uint = 11 ErrInternal uint = 999 ) @@ -200,8 +275,13 @@ func (e *Error) Print() error { // JSON (un)marshallable types type route struct { - Dst IPNet `json:"dst"` - GW net.IP `json:"gw,omitempty"` + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` + MTU int `json:"mtu,omitempty"` + AdvMSS int `json:"advmss,omitempty"` + Priority int `json:"priority,omitempty"` + Table *int `json:"table,omitempty"` + Scope *int `json:"scope,omitempty"` } func (r *Route) UnmarshalJSON(data []byte) error { @@ -212,13 +292,24 @@ func (r *Route) UnmarshalJSON(data []byte) error { r.Dst = net.IPNet(rt.Dst) r.GW = rt.GW + r.MTU = rt.MTU + r.AdvMSS = rt.AdvMSS + r.Priority = rt.Priority + r.Table = rt.Table + r.Scope = rt.Scope + return nil } func (r Route) MarshalJSON() ([]byte, error) { rt := route{ - Dst: IPNet(r.Dst), - GW: r.GW, + Dst: IPNet(r.Dst), + GW: r.GW, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + Table: r.Table, + Scope: r.Scope, } return json.Marshal(rt) diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go index b8ec388745..1981d25569 100644 --- a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -36,7 +36,6 @@ var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) // ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters func ValidateContainerID(containerID string) *types.Error { - if containerID == "" { return types.NewError(types.ErrUnknownContainer, "missing containerID", "") } @@ -48,7 +47,6 @@ func ValidateContainerID(containerID string) *types.Error { // ValidateNetworkName will validate that the supplied networkName does not contain invalid characters func ValidateNetworkName(networkName string) *types.Error { - if networkName == "" { return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") } @@ -58,11 +56,11 @@ func ValidateNetworkName(networkName string) *types.Error { return nil } -// ValidateInterfaceName will validate the interface name based on the three rules below +// ValidateInterfaceName will validate the interface name based on the four rules below // 1. The name must not be empty // 2. The name must be less than 16 characters // 3. The name must not be "." or ".." -// 3. The name must not contain / or : or any whitespace characters +// 4. The name must not contain / or : or any whitespace characters // ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 func ValidateInterfaceName(ifName string) *types.Error { if len(ifName) == 0 { diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go index 17b22b6b0c..e3bd375bca 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -142,3 +142,27 @@ func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { } return false, nil } + +// GreaterThan returns true if the first version is greater than the second +func GreaterThan(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro > secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go index 1326f8038e..cfb6a12fa3 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -19,13 +19,12 @@ import ( "fmt" "github.com/containernetworking/cni/pkg/types" - types100 "github.com/containernetworking/cni/pkg/types/100" "github.com/containernetworking/cni/pkg/types/create" ) // Current reports the version of the CNI spec implemented by this library func Current() string { - return types100.ImplementedSpecVersion + return "1.1.0" } // Legacy PluginInfo describes a plugin that is backwards compatible with the @@ -35,8 +34,10 @@ func Current() string { // // Any future CNI spec versions which meet this definition should be added to // this list. -var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") +var ( + Legacy = PluginSupports("0.1.0", "0.2.0") + All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0") +) // VersionsFrom returns a list of versions starting from min, inclusive func VersionsStartingFrom(min string) PluginInfo { @@ -62,7 +63,7 @@ func NewResult(version string, resultBytes []byte) (types.Result, error) { // ParsePrevResult parses a prevResult in a NetConf structure and sets // the NetConf's PrevResult member to the parsed Result object. -func ParsePrevResult(conf *types.NetConf) error { +func ParsePrevResult(conf *types.PluginConf) error { if conf.RawPrevResult == nil { return nil } diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go index b4db50b9af..53383de8c7 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go @@ -20,6 +20,8 @@ import ( "time" "github.com/vishvananda/netlink" + + "github.com/containernetworking/plugins/pkg/netlinksafe" ) const SETTLE_INTERVAL = 50 * time.Millisecond @@ -29,15 +31,15 @@ const SETTLE_INTERVAL = 50 * time.Millisecond // There is no easy way to wait for this as an event, so just loop until the // addresses are no longer tentative. // If any addresses are still tentative after timeout seconds, then error. -func SettleAddresses(ifName string, timeout int) error { - link, err := netlink.LinkByName(ifName) +func SettleAddresses(ifName string, timeout time.Duration) error { + link, err := netlinksafe.LinkByName(ifName) if err != nil { return fmt.Errorf("failed to retrieve link: %v", err) } - deadline := time.Now().Add(time.Duration(timeout) * time.Second) + deadline := time.Now().Add(timeout) for { - addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + addrs, err := netlinksafe.AddrList(link, netlink.FAMILY_V6) if err != nil { return fmt.Errorf("could not list addresses: %v", err) } @@ -48,7 +50,13 @@ func SettleAddresses(ifName string, timeout int) error { ok := true for _, addr := range addrs { - if addr.Flags&(syscall.IFA_F_TENTATIVE|syscall.IFA_F_DADFAILED) > 0 { + if addr.Flags&(syscall.IFA_F_DADFAILED) != 0 { + return fmt.Errorf("link %s has address %s in DADFAILED state", + ifName, + addr.IP.String()) + } + + if addr.Flags&(syscall.IFA_F_TENTATIVE) != 0 { ok = false break // Break out of the `range addrs`, not the `for` } diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_iptables_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_iptables_linux.go new file mode 100644 index 0000000000..080d4fda62 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_iptables_linux.go @@ -0,0 +1,180 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "errors" + "fmt" + "net" + "strings" + + "github.com/coreos/go-iptables/iptables" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/plugins/pkg/utils" +) + +// setupIPMasqIPTables is the iptables-based implementation of SetupIPMasqForNetworks +func setupIPMasqIPTables(ipns []*net.IPNet, network, _, containerID string) error { + // Note: for historical reasons, the iptables implementation ignores ifname. + chain := utils.FormatChainName(network, containerID) + comment := utils.FormatComment(network, containerID) + for _, ip := range ipns { + if err := SetupIPMasq(ip, chain, comment); err != nil { + return err + } + } + return nil +} + +// SetupIPMasq installs iptables rules to masquerade traffic +// coming from ip of ipn and going outside of ipn. +// Deprecated: This function only supports iptables. Use SetupIPMasqForNetworks, which +// supports both iptables and nftables. +func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + var multicastNet string + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + multicastNet = "ff00::/8" + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + multicastNet = "224.0.0.0/4" + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + // Create chain if doesn't exist + exists := false + chains, err := ipt.ListChains("nat") + if err != nil { + return fmt.Errorf("failed to list chains: %v", err) + } + for _, ch := range chains { + if ch == chain { + exists = true + break + } + } + if !exists { + if err = ipt.NewChain("nat", chain); err != nil { + return err + } + } + + // Packets to this network should not be touched + if err := ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Don't masquerade multicast - pods should be able to talk to other pods + // on the local network via multicast. + if err := ipt.AppendUnique("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Packets from the specific IP of this network will hit the chain + return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) +} + +// teardownIPMasqIPTables is the iptables-based implementation of TeardownIPMasqForNetworks +func teardownIPMasqIPTables(ipns []*net.IPNet, network, _, containerID string) error { + // Note: for historical reasons, the iptables implementation ignores ifname. + chain := utils.FormatChainName(network, containerID) + comment := utils.FormatComment(network, containerID) + + var errs []string + for _, ipn := range ipns { + err := TeardownIPMasq(ipn, chain, comment) + if err != nil { + errs = append(errs, err.Error()) + } + } + + if errs == nil { + return nil + } + return errors.New(strings.Join(errs, "\n")) +} + +// TeardownIPMasq undoes the effects of SetupIPMasq. +// Deprecated: This function only supports iptables. Use TeardownIPMasqForNetworks, which +// supports both iptables and nftables. +func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + // for downward compatibility + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + err = ipt.ClearChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + } + + err = ipt.DeleteChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + } + + return nil +} + +// gcIPMasqIPTables is the iptables-based implementation of GCIPMasqForNetwork +func gcIPMasqIPTables(_ string, _ []types.GCAttachment) error { + // FIXME: The iptables implementation does not support GC. + // + // (In theory, it _could_ backward-compatibly support it, by adding a no-op rule + // with a comment indicating the network to each chain it creates, so that it + // could later figure out which chains corresponded to which networks; older + // implementations would ignore the extra rule but would still correctly delete + // the chain on teardown (because they ClearChain() before doing DeleteChain()). + + return nil +} + +// isNotExist returnst true if the error is from iptables indicating +// that the target does not exist. +func isNotExist(err error) bool { + e, ok := err.(*iptables.Error) + if !ok { + return false + } + return e.IsNotExist() +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go index aa59a8db54..0063e0a786 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go @@ -15,111 +15,78 @@ package ip import ( + "errors" "fmt" "net" + "strings" - "github.com/coreos/go-iptables/iptables" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/plugins/pkg/utils" ) -// SetupIPMasq installs iptables rules to masquerade traffic -// coming from ip of ipn and going outside of ipn -func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error { - isV6 := ipn.IP.To4() == nil - - var ipt *iptables.IPTables - var err error - var multicastNet string - - if isV6 { - ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) - multicastNet = "ff00::/8" - } else { - ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) - multicastNet = "224.0.0.0/4" - } - if err != nil { - return fmt.Errorf("failed to locate iptables: %v", err) - } - - // Create chain if doesn't exist - exists := false - chains, err := ipt.ListChains("nat") - if err != nil { - return fmt.Errorf("failed to list chains: %v", err) - } - for _, ch := range chains { - if ch == chain { - exists = true - break - } - } - if !exists { - if err = ipt.NewChain("nat", chain); err != nil { - return err +// SetupIPMasqForNetworks installs rules to masquerade traffic coming from ips of ipns and +// going outside of ipns, using a chain name based on network, ifname, and containerID. The +// backend can be either "iptables" or "nftables"; if it is nil, then a suitable default +// implementation will be used. +func SetupIPMasqForNetworks(backend *string, ipns []*net.IPNet, network, ifname, containerID string) error { + if backend == nil { + // Prefer iptables, unless only nftables is available + defaultBackend := "iptables" + if !utils.SupportsIPTables() && utils.SupportsNFTables() { + defaultBackend = "nftables" } + backend = &defaultBackend } - // Packets to this network should not be touched - if err := ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil { - return err + switch *backend { + case "iptables": + return setupIPMasqIPTables(ipns, network, ifname, containerID) + case "nftables": + return setupIPMasqNFTables(ipns, network, ifname, containerID) + default: + return fmt.Errorf("unknown ipmasq backend %q", *backend) } - - // Don't masquerade multicast - pods should be able to talk to other pods - // on the local network via multicast. - if err := ipt.AppendUnique("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil { - return err - } - - // Packets from the specific IP of this network will hit the chain - return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) } -// TeardownIPMasq undoes the effects of SetupIPMasq -func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error { - isV6 := ipn.IP.To4() == nil +// TeardownIPMasqForNetworks undoes the effects of SetupIPMasqForNetworks +func TeardownIPMasqForNetworks(ipns []*net.IPNet, network, ifname, containerID string) error { + var errs []string - var ipt *iptables.IPTables - var err error + // Do both the iptables and the nftables cleanup, since the pod may have been + // created with a different version of this plugin or a different configuration. - if isV6 { - ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) - } else { - ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) - } - if err != nil { - return fmt.Errorf("failed to locate iptables: %v", err) + err := teardownIPMasqIPTables(ipns, network, ifname, containerID) + if err != nil && utils.SupportsIPTables() { + errs = append(errs, err.Error()) } - err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) - if err != nil && !isNotExist(err) { - return err + err = teardownIPMasqNFTables(ipns, network, ifname, containerID) + if err != nil && utils.SupportsNFTables() { + errs = append(errs, err.Error()) } - // for downward compatibility - err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment) - if err != nil && !isNotExist(err) { - return err + if errs == nil { + return nil } + return errors.New(strings.Join(errs, "\n")) +} - err = ipt.ClearChain("nat", chain) - if err != nil && !isNotExist(err) { - return err - } +// GCIPMasqForNetwork garbage collects stale IPMasq entries for network +func GCIPMasqForNetwork(network string, attachments []types.GCAttachment) error { + var errs []string - err = ipt.DeleteChain("nat", chain) - if err != nil && !isNotExist(err) { - return err + err := gcIPMasqIPTables(network, attachments) + if err != nil && utils.SupportsIPTables() { + errs = append(errs, err.Error()) } - return nil -} + err = gcIPMasqNFTables(network, attachments) + if err != nil && utils.SupportsNFTables() { + errs = append(errs, err.Error()) + } -// isNotExist returnst true if the error is from iptables indicating -// that the target does not exist. -func isNotExist(err error) bool { - e, ok := err.(*iptables.Error) - if !ok { - return false + if errs == nil { + return nil } - return e.IsNotExist() + return errors.New(strings.Join(errs, "\n")) } diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_nftables_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_nftables_linux.go new file mode 100644 index 0000000000..fd0545eeb5 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_nftables_linux.go @@ -0,0 +1,231 @@ +// Copyright 2023 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "context" + "fmt" + "net" + "strings" + + "sigs.k8s.io/knftables" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/plugins/pkg/utils" +) + +const ( + ipMasqTableName = "cni_plugins_masquerade" + ipMasqChainName = "masq_checks" +) + +// The nftables ipmasq implementation is mostly like the iptables implementation, with +// minor updates to fix a bug (adding `ifname`) and to allow future GC support. +// +// We add a rule for each mapping, with a comment containing a hash of its identifiers, +// so that we can later reliably delete the rules we want. (This is important because in +// edge cases, it's possible the plugin might see "ADD container A with IP 192.168.1.3", +// followed by "ADD container B with IP 192.168.1.3" followed by "DEL container A with IP +// 192.168.1.3", and we need to make sure that the DEL causes us to delete the rule for +// container A, and not the rule for container B.) +// +// It would be more nftables-y to have a chain with a single rule doing a lookup against a +// set with an element per mapping, rather than having a chain with a rule per mapping. +// But there's no easy, non-racy way to say "delete the element 192.168.1.3 from the set, +// but only if it was added for container A, not if it was added for container B". + +// hashForNetwork returns a unique hash for this network +func hashForNetwork(network string) string { + return utils.MustFormatHashWithPrefix(16, "", network) +} + +// hashForInstance returns a unique hash identifying the rules for this +// network/ifname/containerID +func hashForInstance(network, ifname, containerID string) string { + return hashForNetwork(network) + "-" + utils.MustFormatHashWithPrefix(16, "", ifname+":"+containerID) +} + +// commentForInstance returns a comment string that begins with a unique hash and +// ends with a (possibly-truncated) human-readable description. +func commentForInstance(network, ifname, containerID string) string { + comment := fmt.Sprintf("%s, net: %s, if: %s, id: %s", + hashForInstance(network, ifname, containerID), + strings.ReplaceAll(network, `"`, ``), + strings.ReplaceAll(ifname, `"`, ``), + strings.ReplaceAll(containerID, `"`, ``), + ) + if len(comment) > knftables.CommentLengthMax { + comment = comment[:knftables.CommentLengthMax] + } + return comment +} + +// setupIPMasqNFTables is the nftables-based implementation of SetupIPMasqForNetworks +func setupIPMasqNFTables(ipns []*net.IPNet, network, ifname, containerID string) error { + nft, err := knftables.New(knftables.InetFamily, ipMasqTableName) + if err != nil { + return err + } + return setupIPMasqNFTablesWithInterface(nft, ipns, network, ifname, containerID) +} + +func setupIPMasqNFTablesWithInterface(nft knftables.Interface, ipns []*net.IPNet, network, ifname, containerID string) error { + staleRules, err := findRules(nft, hashForInstance(network, ifname, containerID)) + if err != nil { + return err + } + + tx := nft.NewTransaction() + + // Ensure that our table and chains exist. + tx.Add(&knftables.Table{ + Comment: knftables.PtrTo("Masquerading for plugins from github.com/containernetworking/plugins"), + }) + tx.Add(&knftables.Chain{ + Name: ipMasqChainName, + Comment: knftables.PtrTo("Masquerade traffic from certain IPs to any (non-multicast) IP outside their subnet"), + }) + + // Ensure that the postrouting chain exists and has the correct rules. (Has to be + // done after creating ipMasqChainName, so we can jump to it.) + tx.Add(&knftables.Chain{ + Name: "postrouting", + Type: knftables.PtrTo(knftables.NATType), + Hook: knftables.PtrTo(knftables.PostroutingHook), + Priority: knftables.PtrTo(knftables.SNATPriority), + }) + tx.Flush(&knftables.Chain{ + Name: "postrouting", + }) + tx.Add(&knftables.Rule{ + Chain: "postrouting", + Rule: "ip daddr == 224.0.0.0/4 return", + }) + tx.Add(&knftables.Rule{ + Chain: "postrouting", + Rule: "ip6 daddr == ff00::/8 return", + }) + tx.Add(&knftables.Rule{ + Chain: "postrouting", + Rule: knftables.Concat( + "goto", ipMasqChainName, + ), + }) + + // Delete stale rules, add new rules to masquerade chain + for _, rule := range staleRules { + tx.Delete(rule) + } + for _, ipn := range ipns { + ip := "ip" + if ipn.IP.To4() == nil { + ip = "ip6" + } + + // e.g. if ipn is "192.168.1.4/24", then dstNet is "192.168.1.0/24" + dstNet := &net.IPNet{IP: ipn.IP.Mask(ipn.Mask), Mask: ipn.Mask} + + tx.Add(&knftables.Rule{ + Chain: ipMasqChainName, + Rule: knftables.Concat( + ip, "saddr", "==", ipn.IP, + ip, "daddr", "!=", dstNet, + "masquerade", + ), + Comment: knftables.PtrTo(commentForInstance(network, ifname, containerID)), + }) + } + + return nft.Run(context.TODO(), tx) +} + +// teardownIPMasqNFTables is the nftables-based implementation of TeardownIPMasqForNetworks +func teardownIPMasqNFTables(ipns []*net.IPNet, network, ifname, containerID string) error { + nft, err := knftables.New(knftables.InetFamily, ipMasqTableName) + if err != nil { + return err + } + return teardownIPMasqNFTablesWithInterface(nft, ipns, network, ifname, containerID) +} + +func teardownIPMasqNFTablesWithInterface(nft knftables.Interface, _ []*net.IPNet, network, ifname, containerID string) error { + rules, err := findRules(nft, hashForInstance(network, ifname, containerID)) + if err != nil { + return err + } else if len(rules) == 0 { + return nil + } + + tx := nft.NewTransaction() + for _, rule := range rules { + tx.Delete(rule) + } + return nft.Run(context.TODO(), tx) +} + +// gcIPMasqNFTables is the nftables-based implementation of GCIPMasqForNetwork +func gcIPMasqNFTables(network string, attachments []types.GCAttachment) error { + nft, err := knftables.New(knftables.InetFamily, ipMasqTableName) + if err != nil { + return err + } + return gcIPMasqNFTablesWithInterface(nft, network, attachments) +} + +func gcIPMasqNFTablesWithInterface(nft knftables.Interface, network string, attachments []types.GCAttachment) error { + // Find all rules for the network + rules, err := findRules(nft, hashForNetwork(network)) + if err != nil { + return err + } else if len(rules) == 0 { + return nil + } + + // Compute the comments for all elements of attachments + validAttachments := map[string]bool{} + for _, attachment := range attachments { + validAttachments[commentForInstance(network, attachment.IfName, attachment.ContainerID)] = true + } + + // Delete anything in rules that isn't in validAttachments + tx := nft.NewTransaction() + for _, rule := range rules { + if !validAttachments[*rule.Comment] { + tx.Delete(rule) + } + } + return nft.Run(context.TODO(), tx) +} + +// findRules finds rules with comments that start with commentPrefix. +func findRules(nft knftables.Interface, commentPrefix string) ([]*knftables.Rule, error) { + rules, err := nft.ListRules(context.TODO(), ipMasqChainName) + if err != nil { + if knftables.IsNotFound(err) { + // If ipMasqChainName doesn't exist yet, that's fine + return nil, nil + } + return nil, err + } + + matchingRules := make([]*knftables.Rule, 0, 1) + for _, rule := range rules { + if rule.Comment != nil && strings.HasPrefix(*rule.Comment, commentPrefix) { + matchingRules = append(matchingRules, rule) + } + } + + return matchingRules, nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go index 07adea1925..8f677bf369 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go @@ -24,6 +24,7 @@ import ( "github.com/safchain/ethtool" "github.com/vishvananda/netlink" + "github.com/containernetworking/plugins/pkg/netlinksafe" "github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/utils/sysctl" ) @@ -32,11 +33,12 @@ var ErrLinkNotFound = errors.New("link not found") // makeVethPair is called from within the container's network namespace func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netlink.Link, error) { + linkAttrs := netlink.NewLinkAttrs() + linkAttrs.Name = name + linkAttrs.MTU = mtu + veth := &netlink.Veth{ - LinkAttrs: netlink.LinkAttrs{ - Name: name, - MTU: mtu, - }, + LinkAttrs: linkAttrs, PeerName: peer, PeerNamespace: netlink.NsFd(int(hostNS.Fd())), } @@ -51,7 +53,7 @@ func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netl return nil, err } // Re-fetch the container link to get its creation-time parameters, e.g. index and mac - veth2, err := netlink.LinkByName(name) + veth2, err := netlinksafe.LinkByName(name) if err != nil { netlink.LinkDel(veth) // try and clean up the link if possible. return nil, err @@ -61,7 +63,7 @@ func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netl } func peerExists(name string) bool { - if _, err := netlink.LinkByName(name); err != nil { + if _, err := netlinksafe.LinkByName(name); err != nil { return false } return true @@ -113,7 +115,7 @@ func RandomVethName() (string, error) { } func RenameLink(curName, newName string) error { - link, err := netlink.LinkByName(curName) + link, err := netlinksafe.LinkByName(curName) if err == nil { err = netlink.LinkSetName(link, newName) } @@ -144,7 +146,7 @@ func SetupVethWithName(contVethName, hostVethName string, mtu int, contVethMac s var hostVeth netlink.Link err = hostNS.Do(func(_ ns.NetNS) error { - hostVeth, err = netlink.LinkByName(hostVethName) + hostVeth, err = netlinksafe.LinkByName(hostVethName) if err != nil { return fmt.Errorf("failed to lookup %q in %q: %v", hostVethName, hostNS.Path(), err) } @@ -173,7 +175,7 @@ func SetupVeth(contVethName string, mtu int, contVethMac string, hostNS ns.NetNS // DelLinkByName removes an interface link. func DelLinkByName(ifName string) error { - iface, err := netlink.LinkByName(ifName) + iface, err := netlinksafe.LinkByName(ifName) if err != nil { if _, ok := err.(netlink.LinkNotFoundError); ok { return ErrLinkNotFound @@ -190,7 +192,7 @@ func DelLinkByName(ifName string) error { // DelLinkByNameAddr remove an interface and returns its addresses func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) { - iface, err := netlink.LinkByName(ifName) + iface, err := netlinksafe.LinkByName(ifName) if err != nil { if _, ok := err.(netlink.LinkNotFoundError); ok { return nil, ErrLinkNotFound @@ -198,7 +200,7 @@ func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) { return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err) } - addrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL) + addrs, err := netlinksafe.AddrList(iface, netlink.FAMILY_ALL) if err != nil { return nil, fmt.Errorf("failed to get IP addresses for %q: %v", ifName, err) } @@ -221,7 +223,7 @@ func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) { // veth, or an error. This peer ifindex will only be valid in the peer's // network namespace. func GetVethPeerIfindex(ifName string) (netlink.Link, int, error) { - link, err := netlink.LinkByName(ifName) + link, err := netlinksafe.LinkByName(ifName) if err != nil { return nil, -1, fmt.Errorf("could not look up %q: %v", ifName, err) } diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go index e92b6c53e4..4072898aa8 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go @@ -50,3 +50,16 @@ func AddDefaultRoute(gw net.IP, dev netlink.Link) error { } return AddRoute(defNet, gw, dev) } + +// IsIPNetZero check if the IPNet is "0.0.0.0/0" or "::/0" +// This is needed as go-netlink replaces nil Dst with a '0' IPNet since +// https://github.com/vishvananda/netlink/commit/acdc658b8613655ddb69f978e9fb4cf413e2b830 +func IsIPNetZero(ipnet *net.IPNet) bool { + if ipnet == nil { + return true + } + if ones, _ := ipnet.Mask.Size(); ones != 0 { + return false + } + return ipnet.IP.Equal(net.IPv4zero) || ipnet.IP.Equal(net.IPv6zero) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go index 12a6175b42..2926def923 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containernetworking/cni/pkg/types" current "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/plugins/pkg/netlinksafe" ) func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) error { @@ -33,12 +34,12 @@ func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) ourAddr := netlink.Addr{IPNet: &ips.Address} match := false - link, err := netlink.LinkByName(ifName) + link, err := netlinksafe.LinkByName(ifName) if err != nil { return fmt.Errorf("Cannot find container link %v", ifName) } - addrList, err := netlink.AddrList(link, netlink.FAMILY_ALL) + addrList, err := netlinksafe.AddrList(link, netlink.FAMILY_ALL) if err != nil { return fmt.Errorf("Cannot obtain List of IP Addresses") } @@ -67,7 +68,7 @@ func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) family = netlink.FAMILY_V4 } - gwy, err := netlink.RouteListFiltered(family, findGwy, routeFilter) + gwy, err := netlinksafe.RouteListFiltered(family, findGwy, routeFilter) if err != nil { return fmt.Errorf("Error %v trying to find Gateway %v for interface %v", err, ips.Gateway, ifName) } @@ -108,7 +109,7 @@ func ValidateExpectedRoute(resultRoutes []*types.Route) error { return fmt.Errorf("Invalid static route found %v", route) } - wasFound, err := netlink.RouteListFiltered(family, find, routeFilter) + wasFound, err := netlinksafe.RouteListFiltered(family, find, routeFilter) if err != nil { return fmt.Errorf("Expected Route %v not route table lookup error %v", route, err) } diff --git a/vendor/github.com/containernetworking/plugins/pkg/netlinksafe/netlink.go b/vendor/github.com/containernetworking/plugins/pkg/netlinksafe/netlink.go new file mode 100644 index 0000000000..0f7f45b6d2 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/netlinksafe/netlink.go @@ -0,0 +1,321 @@ +// Package netlinksafe wraps vishvandanda/netlink functions that may return EINTR. +// +// A Handle instantiated using [NewHandle] or [NewHandleAt] can be used in place +// of a netlink.Handle, it's a wrapper that replaces methods that need to be +// wrapped. Functions that use the package handle need to be called as "netlinksafe.X" +// instead of "netlink.X". +// +// The wrapped functions currently return EINTR when NLM_F_DUMP_INTR flagged +// in a netlink response, meaning something changed during the dump so results +// may be incomplete or inconsistent. +// +// At present, the possibly incomplete/inconsistent results are not returned +// by netlink functions along with the EINTR. So, it's not possible to do +// anything but retry. After maxAttempts the EINTR will be returned to the +// caller. +package netlinksafe + +import ( + "log" + + "github.com/pkg/errors" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +// Arbitrary limit on max attempts at netlink calls if they are repeatedly interrupted. +const maxAttempts = 5 + +type Handle struct { + *netlink.Handle +} + +func NewHandle(nlFamilies ...int) (Handle, error) { + nlh, err := netlink.NewHandle(nlFamilies...) + if err != nil { + return Handle{}, err + } + return Handle{nlh}, nil +} + +func NewHandleAt(ns netns.NsHandle, nlFamilies ...int) (Handle, error) { + nlh, err := netlink.NewHandleAt(ns, nlFamilies...) + if err != nil { + return Handle{}, err + } + return Handle{nlh}, nil +} + +func (h Handle) Close() { + if h.Handle != nil { + h.Handle.Close() + } +} + +func retryOnIntr(f func() error) { + for attempt := 0; attempt < maxAttempts; attempt++ { + if err := f(); !errors.Is(err, netlink.ErrDumpInterrupted) { + return + } + } + log.Printf("netlink call interrupted after %d attempts", maxAttempts) +} + +func discardErrDumpInterrupted(err error) error { + if errors.Is(err, netlink.ErrDumpInterrupted) { + // The netlink function has returned possibly-inconsistent data along with the + // error. Discard the error and return the data. This restores the behaviour of + // the netlink package prior to v1.2.1, in which NLM_F_DUMP_INTR was ignored in + // the netlink response. + log.Printf("discarding ErrDumpInterrupted: %+v", errors.WithStack(err)) + return nil + } + return err +} + +// AddrList calls netlink.AddrList, retrying if necessary. +func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) { + var addrs []netlink.Addr + var err error + retryOnIntr(func() error { + addrs, err = netlink.AddrList(link, family) //nolint:forbidigo + return err + }) + return addrs, discardErrDumpInterrupted(err) +} + +// LinkByName calls h.Handle.LinkByName, retrying if necessary. The netlink function +// doesn't normally ask the kernel for a dump of links. But, on an old kernel, it +// will do as a fallback and that dump may get inconsistent results. +func (h Handle) LinkByName(name string) (netlink.Link, error) { + var link netlink.Link + var err error + retryOnIntr(func() error { + link, err = h.Handle.LinkByName(name) //nolint:forbidigo + return err + }) + return link, discardErrDumpInterrupted(err) +} + +// LinkByName calls netlink.LinkByName, retrying if necessary. The netlink +// function doesn't normally ask the kernel for a dump of links. But, on an old +// kernel, it will do as a fallback and that dump may get inconsistent results. +func LinkByName(name string) (netlink.Link, error) { + var link netlink.Link + var err error + retryOnIntr(func() error { + link, err = netlink.LinkByName(name) //nolint:forbidigo + return err + }) + return link, discardErrDumpInterrupted(err) +} + +// LinkList calls h.Handle.LinkList, retrying if necessary. +func (h Handle) LinkList() ([]netlink.Link, error) { + var links []netlink.Link + var err error + retryOnIntr(func() error { + links, err = h.Handle.LinkList() //nolint:forbidigo + return err + }) + return links, discardErrDumpInterrupted(err) +} + +// LinkList calls netlink.Handle.LinkList, retrying if necessary. +func LinkList() ([]netlink.Link, error) { + var links []netlink.Link + var err error + retryOnIntr(func() error { + links, err = netlink.LinkList() //nolint:forbidigo + return err + }) + return links, discardErrDumpInterrupted(err) +} + +// RouteList calls h.Handle.RouteList, retrying if necessary. +func (h Handle) RouteList(link netlink.Link, family int) ([]netlink.Route, error) { + var routes []netlink.Route + var err error + retryOnIntr(func() error { + routes, err = h.Handle.RouteList(link, family) //nolint:forbidigo + return err + }) + return routes, err +} + +// RouteList calls netlink.RouteList, retrying if necessary. +func RouteList(link netlink.Link, family int) ([]netlink.Route, error) { + var route []netlink.Route + var err error + retryOnIntr(func() error { + route, err = netlink.RouteList(link, family) //nolint:forbidigo + return err + }) + return route, discardErrDumpInterrupted(err) +} + +// BridgeVlanList calls netlink.BridgeVlanList, retrying if necessary. +func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + var err error + var info map[int32][]*nl.BridgeVlanInfo + retryOnIntr(func() error { + info, err = netlink.BridgeVlanList() //nolint:forbidigo + return err + }) + return info, discardErrDumpInterrupted(err) +} + +// RouteListFiltered calls h.Handle.RouteListFiltered, retrying if necessary. +func (h Handle) RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + var routes []netlink.Route + var err error + retryOnIntr(func() error { + routes, err = h.Handle.RouteListFiltered(family, filter, filterMask) //nolint:forbidigo + return err + }) + return routes, err +} + +// RouteListFiltered calls netlink.RouteListFiltered, retrying if necessary. +func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + var route []netlink.Route + var err error + retryOnIntr(func() error { + route, err = netlink.RouteListFiltered(family, filter, filterMask) //nolint:forbidigo + return err + }) + return route, discardErrDumpInterrupted(err) +} + +// QdiscList calls netlink.QdiscList, retrying if necessary. +func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) { + var qdisc []netlink.Qdisc + var err error + retryOnIntr(func() error { + qdisc, err = netlink.QdiscList(link) //nolint:forbidigo + return err + }) + return qdisc, discardErrDumpInterrupted(err) +} + +// QdiscList calls h.Handle.QdiscList, retrying if necessary. +func (h *Handle) QdiscList(link netlink.Link) ([]netlink.Qdisc, error) { + var qdisc []netlink.Qdisc + var err error + retryOnIntr(func() error { + qdisc, err = h.Handle.QdiscList(link) //nolint:forbidigo + return err + }) + return qdisc, err +} + +// LinkGetProtinfo calls netlink.LinkGetProtinfo, retrying if necessary. +func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) { + var protinfo netlink.Protinfo + var err error + retryOnIntr(func() error { + protinfo, err = netlink.LinkGetProtinfo(link) //nolint:forbidigo + return err + }) + return protinfo, discardErrDumpInterrupted(err) +} + +// LinkGetProtinfo calls h.Handle.LinkGetProtinfo, retrying if necessary. +func (h *Handle) LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) { + var protinfo netlink.Protinfo + var err error + retryOnIntr(func() error { + protinfo, err = h.Handle.LinkGetProtinfo(link) //nolint:forbidigo + return err + }) + return protinfo, err +} + +// RuleListFiltered calls netlink.RuleListFiltered, retrying if necessary. +func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) { + var rules []netlink.Rule + var err error + retryOnIntr(func() error { + rules, err = netlink.RuleListFiltered(family, filter, filterMask) //nolint:forbidigo + return err + }) + return rules, discardErrDumpInterrupted(err) +} + +// RuleListFiltered calls h.Handle.RuleListFiltered, retrying if necessary. +func (h *Handle) RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) { + var rules []netlink.Rule + var err error + retryOnIntr(func() error { + rules, err = h.Handle.RuleListFiltered(family, filter, filterMask) //nolint:forbidigo + return err + }) + return rules, err +} + +// FilterList calls netlink.FilterList, retrying if necessary. +func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) { + var filters []netlink.Filter + var err error + retryOnIntr(func() error { + filters, err = netlink.FilterList(link, parent) //nolint:forbidigo + return err + }) + return filters, discardErrDumpInterrupted(err) +} + +// FilterList calls h.Handle.FilterList, retrying if necessary. +func (h *Handle) FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) { + var filters []netlink.Filter + var err error + retryOnIntr(func() error { + filters, err = h.Handle.FilterList(link, parent) //nolint:forbidigo + return err + }) + return filters, err +} + +// RuleList calls netlink.RuleList, retrying if necessary. +func RuleList(family int) ([]netlink.Rule, error) { + var rules []netlink.Rule + var err error + retryOnIntr(func() error { + rules, err = netlink.RuleList(family) //nolint:forbidigo + return err + }) + return rules, discardErrDumpInterrupted(err) +} + +// RuleList calls h.Handle.RuleList, retrying if necessary. +func (h *Handle) RuleList(family int) ([]netlink.Rule, error) { + var rules []netlink.Rule + var err error + retryOnIntr(func() error { + rules, err = h.Handle.RuleList(family) //nolint:forbidigo + return err + }) + return rules, err +} + +// ConntrackDeleteFilters calls netlink.ConntrackDeleteFilters, retrying if necessary. +func ConntrackDeleteFilters(table netlink.ConntrackTableType, family netlink.InetFamily, filters ...netlink.CustomConntrackFilter) (uint, error) { + var deleted uint + var err error + retryOnIntr(func() error { + deleted, err = netlink.ConntrackDeleteFilters(table, family, filters...) //nolint:forbidigo + return err + }) + return deleted, discardErrDumpInterrupted(err) +} + +// ConntrackDeleteFilters calls h.Handle.ConntrackDeleteFilters, retrying if necessary. +func (h *Handle) ConntrackDeleteFilters(table netlink.ConntrackTableType, family netlink.InetFamily, filters ...netlink.CustomConntrackFilter) (uint, error) { + var deleted uint + var err error + retryOnIntr(func() error { + deleted, err = h.Handle.ConntrackDeleteFilters(table, family, filters...) //nolint:forbidigo + return err + }) + return deleted, err +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md index 1e265c7a01..e5fef2db70 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -13,10 +13,10 @@ The `ns.Do()` method provides **partial** control over network namespaces for yo ```go err = targetNs.Do(func(hostNs ns.NetNS) error { + linkAttrs := netlink.NewLinkAttrs() + linkAttrs.Name = "dummy0" dummy := &netlink.Dummy{ - LinkAttrs: netlink.LinkAttrs{ - Name: "dummy0", - }, + LinkAttrs: linkAttrs, } return netlink.LinkAdd(dummy) }) diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go index f260f28132..5a6aaa3339 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -31,6 +31,10 @@ func GetCurrentNS() (NetNS, error) { // return an unexpected network namespace. runtime.LockOSThread() defer runtime.UnlockOSThread() + return getCurrentNSNoLock() +} + +func getCurrentNSNoLock() (NetNS, error) { return GetNS(getCurrentThreadNetNSPath()) } @@ -152,6 +156,54 @@ func GetNS(nspath string) (NetNS, error) { return &netNS{file: fd}, nil } +// Returns a new empty NetNS. +// Calling Close() let the kernel garbage collect the network namespace. +func TempNetNS() (NetNS, error) { + var tempNS NetNS + var err error + var wg sync.WaitGroup + wg.Add(1) + + // Create the new namespace in a new goroutine so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + go func() { + defer wg.Done() + runtime.LockOSThread() + + var threadNS NetNS + // save a handle to current network namespace + threadNS, err = getCurrentNSNoLock() + if err != nil { + err = fmt.Errorf("failed to open current namespace: %v", err) + return + } + defer threadNS.Close() + + // create the temporary network namespace + err = unix.Unshare(unix.CLONE_NEWNET) + if err != nil { + return + } + + // get a handle to the temporary network namespace + tempNS, err = getCurrentNSNoLock() + + err2 := threadNS.Set() + if err2 == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + wg.Wait() + return tempNS, err +} + func (ns *netNS) Path() string { return ns.file.Name() } @@ -173,7 +225,7 @@ func (ns *netNS) Do(toRun func(NetNS) error) error { } containedCall := func(hostNS NetNS) error { - threadNS, err := GetCurrentNS() + threadNS, err := getCurrentNSNoLock() if err != nil { return fmt.Errorf("failed to open current netns: %v", err) } diff --git a/vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go b/vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go index 6f65d6ddd3..276f9e5a6d 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go +++ b/vendor/github.com/containernetworking/plugins/pkg/testutils/cmd.go @@ -114,3 +114,12 @@ func CmdDel(cniNetns, cniContainerID, cniIfname string, f func() error) error { func CmdDelWithArgs(args *skel.CmdArgs, f func() error) error { return CmdDel(args.Netns, args.ContainerID, args.IfName, f) } + +func CmdStatus(f func() error) error { + os.Setenv("CNI_COMMAND", "STATUS") + os.Setenv("CNI_PATH", os.Getenv("PATH")) + os.Setenv("CNI_NETNS_OVERRIDE", "1") + defer envCleanup() + + return f() +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go b/vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go index 9444a8b2d6..9f5140fc62 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go +++ b/vendor/github.com/containernetworking/plugins/pkg/testutils/testing.go @@ -19,7 +19,7 @@ import ( ) // AllSpecVersions contains all CNI spec version numbers -var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0"} +var AllSpecVersions = [...]string{"0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0"} // SpecVersionHasIPVersion returns true if the given CNI specification version // includes the "version" field in the IP address elements @@ -39,6 +39,13 @@ func SpecVersionHasCHECK(ver string) bool { return ok } +// SpecVersionHasSTATUS returns true if the given CNI specification version +// supports the STATUS command +func SpecVersionHasSTATUS(ver string) bool { + ok, _ := version.GreaterThanOrEqualTo(ver, "1.1.0") + return ok +} + // SpecVersionHasChaining returns true if the given CNI specification version // supports plugin chaining func SpecVersionHasChaining(ver string) bool { diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/conntrack.go b/vendor/github.com/containernetworking/plugins/pkg/utils/conntrack.go new file mode 100644 index 0000000000..f4cc2627ce --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/conntrack.go @@ -0,0 +1,75 @@ +// Copyright 2020 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + "net" + + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + + "github.com/containernetworking/plugins/pkg/netlinksafe" +) + +// Assigned Internet Protocol Numbers +// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +const ( + PROTOCOL_TCP = 6 + PROTOCOL_UDP = 17 + PROTOCOL_SCTP = 132 +) + +// getNetlinkFamily returns the Netlink IP family constant +func getNetlinkFamily(isIPv6 bool) netlink.InetFamily { + if isIPv6 { + return unix.AF_INET6 + } + return unix.AF_INET +} + +// DeleteConntrackEntriesForDstIP delete the conntrack entries for the connections +// specified by the given destination IP and protocol +func DeleteConntrackEntriesForDstIP(dstIP string, protocol uint8) error { + ip := net.ParseIP(dstIP) + if ip == nil { + return fmt.Errorf("error deleting connection tracking state, bad IP %s", ip) + } + family := getNetlinkFamily(ip.To4() == nil) + + filter := &netlink.ConntrackFilter{} + filter.AddIP(netlink.ConntrackOrigDstIP, ip) + filter.AddProtocol(protocol) + + _, err := netlinksafe.ConntrackDeleteFilters(netlink.ConntrackTable, family, filter) + if err != nil { + return fmt.Errorf("error deleting connection tracking state for protocol: %d IP: %s, error: %v", protocol, ip, err) + } + return nil +} + +// DeleteConntrackEntriesForDstPort delete the conntrack entries for the connections specified +// by the given destination port, protocol and IP family +func DeleteConntrackEntriesForDstPort(port uint16, protocol uint8, family netlink.InetFamily) error { + filter := &netlink.ConntrackFilter{} + filter.AddProtocol(protocol) + filter.AddPort(netlink.ConntrackOrigDstPort, port) + + _, err := netlinksafe.ConntrackDeleteFilters(netlink.ConntrackTable, family, filter) + if err != nil { + return fmt.Errorf("error deleting connection tracking state for protocol: %d Port: %d, error: %v", protocol, port, err) + } + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/iptables.go b/vendor/github.com/containernetworking/plugins/pkg/utils/iptables.go new file mode 100644 index 0000000000..b83e6d26c3 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/iptables.go @@ -0,0 +1,120 @@ +// Copyright 2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "errors" + "fmt" + + "github.com/coreos/go-iptables/iptables" +) + +const statusChainExists = 1 + +// EnsureChain idempotently creates the iptables chain. It does not +// return an error if the chain already exists. +func EnsureChain(ipt *iptables.IPTables, table, chain string) error { + if ipt == nil { + return errors.New("failed to ensure iptable chain: IPTables was nil") + } + exists, err := ipt.ChainExists(table, chain) + if err != nil { + return fmt.Errorf("failed to check iptables chain existence: %v", err) + } + if !exists { + err = ipt.NewChain(table, chain) + if err != nil { + eerr, eok := err.(*iptables.Error) + if eok && eerr.ExitStatus() != statusChainExists { + return err + } + } + } + return nil +} + +// DeleteRule idempotently delete the iptables rule in the specified table/chain. +// It does not return an error if the referring chain doesn't exist +func DeleteRule(ipt *iptables.IPTables, table, chain string, rulespec ...string) error { + if ipt == nil { + return errors.New("failed to ensure iptable chain: IPTables was nil") + } + if err := ipt.Delete(table, chain, rulespec...); err != nil { + eerr, eok := err.(*iptables.Error) + switch { + case eok && eerr.IsNotExist(): + // swallow here, the chain was already deleted + return nil + case eok && eerr.ExitStatus() == 2: + // swallow here, invalid command line parameter because the referring rule is missing + return nil + default: + return fmt.Errorf("Failed to delete referring rule %s %s: %v", table, chain, err) + } + } + return nil +} + +// DeleteChain idempotently deletes the specified table/chain. +// It does not return an errors if the chain does not exist +func DeleteChain(ipt *iptables.IPTables, table, chain string) error { + if ipt == nil { + return errors.New("failed to ensure iptable chain: IPTables was nil") + } + + err := ipt.DeleteChain(table, chain) + eerr, eok := err.(*iptables.Error) + switch { + case eok && eerr.IsNotExist(): + // swallow here, the chain was already deleted + return nil + default: + return err + } +} + +// ClearChain idempotently clear the iptables rules in the specified table/chain. +// If the chain does not exist, a new one will be created +func ClearChain(ipt *iptables.IPTables, table, chain string) error { + if ipt == nil { + return errors.New("failed to ensure iptable chain: IPTables was nil") + } + err := ipt.ClearChain(table, chain) + eerr, eok := err.(*iptables.Error) + switch { + case eok && eerr.IsNotExist(): + // swallow here, the chain was already deleted + return EnsureChain(ipt, table, chain) + default: + return err + } +} + +// InsertUnique will add a rule to a chain if it does not already exist. +// By default the rule is appended, unless prepend is true. +func InsertUnique(ipt *iptables.IPTables, table, chain string, prepend bool, rule []string) error { + exists, err := ipt.Exists(table, chain, rule...) + if err != nil { + return err + } + if exists { + return nil + } + + if prepend { + return ipt.Insert(table, chain, 1, rule...) + } + return ipt.Append(table, chain, rule...) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/netfilter.go b/vendor/github.com/containernetworking/plugins/pkg/utils/netfilter.go new file mode 100644 index 0000000000..1fa391404d --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/netfilter.go @@ -0,0 +1,46 @@ +// Copyright 2023 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "github.com/coreos/go-iptables/iptables" + "sigs.k8s.io/knftables" +) + +// SupportsIPTables tests whether the system supports using netfilter via the iptables API +// (whether via "iptables-legacy" or "iptables-nft"). (Note that this returns true if it +// is *possible* to use iptables; it does not test whether any other components on the +// system are *actually* using iptables.) +func SupportsIPTables() bool { + ipt, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + return false + } + // We don't care whether the chain actually exists, only whether we can *check* + // whether it exists. + _, err = ipt.ChainExists("filter", "INPUT") + return err == nil +} + +// SupportsNFTables tests whether the system supports using netfilter via the nftables API +// (ie, not via "iptables-nft"). (Note that this returns true if it is *possible* to use +// nftables; it does not test whether any other components on the system are *actually* +// using nftables.) +func SupportsNFTables() bool { + // knftables.New() does sanity checks so we don't need any further test like in + // the iptables case. + _, err := knftables.New(knftables.IPv4Family, "supports_nftables_test") + return err == nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/utils.go b/vendor/github.com/containernetworking/plugins/pkg/utils/utils.go new file mode 100644 index 0000000000..d4fb011cb5 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/utils.go @@ -0,0 +1,60 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "crypto/sha512" + "fmt" +) + +const ( + maxChainLength = 28 + chainPrefix = "CNI-" +) + +// FormatChainName generates a chain name to be used +// with iptables. Ensures that the generated chain +// name is exactly maxChainLength chars in length. +func FormatChainName(name string, id string) string { + return MustFormatChainNameWithPrefix(name, id, "") +} + +// MustFormatChainNameWithPrefix generates a chain name similar +// to FormatChainName, but adds a custom prefix between +// chainPrefix and unique identifier. Ensures that the +// generated chain name is exactly maxChainLength chars in length. +// Panics if the given prefix is too long. +func MustFormatChainNameWithPrefix(name string, id string, prefix string) string { + return MustFormatHashWithPrefix(maxChainLength, chainPrefix+prefix, name+id) +} + +// FormatComment returns a comment used for easier +// rule identification within iptables. +func FormatComment(name string, id string) string { + return fmt.Sprintf("name: %q id: %q", name, id) +} + +const MaxHashLen = sha512.Size * 2 + +// MustFormatHashWithPrefix returns a string of given length that begins with the +// given prefix. It is filled with entropy based on the given string toHash. +func MustFormatHashWithPrefix(length int, prefix string, toHash string) string { + if len(prefix) >= length || length > MaxHashLen { + panic("invalid length") + } + + output := sha512.Sum512([]byte(toHash)) + return fmt.Sprintf("%s%x", prefix, output)[:length] +} diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go index e95929c921..b0589959b3 100644 --- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go +++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go @@ -45,15 +45,21 @@ func (e *Error) Error() string { return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) } +var isNotExistPatterns = []string{ + "Bad rule (does a matching rule exist in that chain?).\n", + "No chain/target/match by that name.\n", + "No such file or directory", + "does not exist", +} + // IsNotExist returns true if the error is due to the chain or rule not existing func (e *Error) IsNotExist() bool { - if e.ExitStatus() != 1 { - return false + for _, str := range isNotExistPatterns { + if strings.Contains(e.msg, str) { + return true + } } - msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" - msgNoChainExist := "No chain/target/match by that name.\n" - msgENOENT := "No such file or directory" - return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) || strings.Contains(e.msg, msgENOENT) + return false } // Protocol to differentiate between IPv4 and IPv6 @@ -106,8 +112,20 @@ func Timeout(timeout int) option { } } -// New creates a new IPTables configured with the options passed as parameter. -// For backwards compatibility, by default always uses IPv4 and timeout 0. +func Path(path string) option { + return func(ipt *IPTables) { + ipt.path = path + } +} + +// New creates a new IPTables configured with the options passed as parameters. +// Supported parameters are: +// +// IPFamily(Protocol) +// Timeout(int) +// Path(string) +// +// For backwards compatibility, by default New uses IPv4 and timeout 0. // i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing // the IPFamily and Timeout options as follow: // @@ -117,13 +135,21 @@ func New(opts ...option) (*IPTables, error) { ipt := &IPTables{ proto: ProtocolIPv4, timeout: 0, + path: "", } for _, opt := range opts { opt(ipt) } - path, err := exec.LookPath(getIptablesCommand(ipt.proto)) + // if path wasn't preset through New(Path()), autodiscover it + cmd := "" + if ipt.path == "" { + cmd = getIptablesCommand(ipt.proto) + } else { + cmd = ipt.path + } + path, err := exec.LookPath(cmd) if err != nil { return nil, err } @@ -241,6 +267,12 @@ func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) err return err } +// DeleteById deletes the rule with the specified ID in the given table and chain. +func (ipt *IPTables) DeleteById(table, chain string, id int) error { + cmd := []string{"-t", table, "-D", chain, strconv.Itoa(id)} + return ipt.run(cmd...) +} + // List rules in specified table/chain func (ipt *IPTables) ListById(table, chain string, id int) (string, error) { args := []string{"-t", table, "-S", chain, strconv.Itoa(id)} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go new file mode 100644 index 0000000000..0ec4b12c75 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go @@ -0,0 +1,62 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +func fmtListFlags(flags blackfriday.ListType) string { + knownFlags := []struct { + name string + flag blackfriday.ListType + }{ + {"ListTypeOrdered", blackfriday.ListTypeOrdered}, + {"ListTypeDefinition", blackfriday.ListTypeDefinition}, + {"ListTypeTerm", blackfriday.ListTypeTerm}, + {"ListItemContainsBlock", blackfriday.ListItemContainsBlock}, + {"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList}, + {"ListItemEndOfList", blackfriday.ListItemEndOfList}, + } + + var f []string + for _, kf := range knownFlags { + if flags&kf.flag != 0 { + f = append(f, kf.name) + flags &^= kf.flag + } + } + if flags != 0 { + f = append(f, fmt.Sprintf("Unknown(%#x)", flags)) + } + return strings.Join(f, "|") +} + +type debugDecorator struct { + blackfriday.Renderer +} + +func depth(node *blackfriday.Node) int { + d := 0 + for n := node.Parent; n != nil; n = n.Parent { + d++ + } + return d +} + +func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + fmt.Fprintf(os.Stderr, "%s%s %v %v\n", + strings.Repeat(" ", depth(node)), + map[bool]string{true: "+", false: "-"}[entering], + node, + fmtListFlags(node.ListFlags)) + var b strings.Builder + status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering) + if b.Len() > 0 { + fmt.Fprintf(os.Stderr, ">> %q\n", b.String()) + } + return status +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index 42bf32aab0..62d91b77d5 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -1,16 +1,23 @@ package md2man import ( + "os" + "strconv" + "github.com/russross/blackfriday/v2" ) // Render converts a markdown document into a roff formatted document. func Render(doc []byte) []byte { renderer := NewRoffRenderer() + var r blackfriday.Renderer = renderer + if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v { + r = &debugDecorator{Renderer: r} + } return blackfriday.Run(doc, []blackfriday.Option{ - blackfriday.WithRenderer(renderer), + blackfriday.WithRenderer(r), blackfriday.WithExtensions(renderer.GetExtensions()), }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 8a290f1972..96a80c99b8 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -14,10 +14,8 @@ import ( // roffRenderer implements the blackfriday.Renderer interface for creating // roff format (manpages) from markdown text type roffRenderer struct { - extensions blackfriday.Extensions listCounters []int firstHeader bool - firstDD bool listDepth int } @@ -43,7 +41,7 @@ const ( quoteTag = "\n.PP\n.RS\n" quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" + listCloseTag = ".RE\n" dtTag = "\n.TP\n" dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" @@ -56,23 +54,18 @@ const ( // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents // from markdown func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } + return &roffRenderer{} } // GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions +func (*roffRenderer) GetExtensions() blackfriday.Extensions { + return blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.SpaceHeadings | + blackfriday.Footnotes | + blackfriday.Titleblock | + blackfriday.DefinitionLists } // RenderHeader handles outputting the header at document start @@ -103,7 +96,23 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - escapeSpecialChars(w, node.Literal) + // Special case: format the NAME section as required for proper whatis parsing. + // Refer to the lexgrog(1) and groff_man(7) manual pages for details. + if node.Parent != nil && + node.Parent.Type == blackfriday.Paragraph && + node.Parent.Prev != nil && + node.Parent.Prev.Type == blackfriday.Heading && + node.Parent.Prev.FirstChild != nil && + bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) { + before, after, found := bytesCut(node.Literal, []byte(" - ")) + escapeSpecialChars(w, before) + if found { + out(w, ` \- `) + escapeSpecialChars(w, after) + } + } else { + escapeSpecialChars(w, node.Literal) + } case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -141,14 +150,25 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering case blackfriday.Document: break case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } if entering { - out(w, paraTag) + if r.listDepth > 0 { + // roff .PP markers break lists + if node.Prev != nil { // continued paragraph + if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, crTag) + } + } + } else if node.Prev != nil && node.Prev.Type == blackfriday.Heading { + out(w, crTag) + } else { + out(w, paraTag) + } } else { - out(w, crTag) + if node.Next == nil || node.Next.Type != blackfriday.List { + out(w, crTag) + } } case blackfriday.BlockQuote: if entering { @@ -211,6 +231,10 @@ func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, enteri func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { openTag := listTag closeTag := listCloseTag + if (entering && r.listDepth == 0) || (!entering && r.listDepth == 1) { + openTag = crTag + closeTag = "" + } if node.ListFlags&blackfriday.ListTypeDefinition != 0 { // tags for definition lists handled within Item node openTag = "" @@ -239,23 +263,25 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { // DT (definition term): line just before DD (see below). out(w, dtTag) - r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { // DD (definition description): line that starts with ": ". // // We have to distinguish between the first DD and the // subsequent ones, as there should be no vertical // whitespace between the DT and the first DD. - if r.firstDD { - r.firstDD = false - } else { - out(w, dd2Tag) + if node.Prev != nil && node.Prev.ListFlags&(blackfriday.ListTypeTerm|blackfriday.ListTypeDefinition) == blackfriday.ListTypeDefinition { + if node.Prev.Type == blackfriday.Item && + node.Prev.LastChild != nil && + node.Prev.LastChild.Type == blackfriday.List && + node.Prev.LastChild.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, dd2Tag) + } } } else { out(w, ".IP \\(bu 2\n") } - } else { - out(w, "\n") } } @@ -380,3 +406,12 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) { w.Write([]byte{'\\', text[i]}) // nolint: errcheck } } + +// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17 +// and older. We can remove this once we drop support for go1.17 and older. +func bytesCut(s, sep []byte) (before, after []byte, found bool) { + if i := bytes.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, nil, false +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca9..6f24dfff56 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,26 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) + +## [v3.12.1] - 2024-05-28 + +- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen) + +## [v3.12.0] - 2024-03-11 + +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 + +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index e3e30080ec..3fb40d1980 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,9 +2,8 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) @@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99f..80adf55fdf 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index ba1fc5d5f1..6fd2bcd5a1 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + for _, eachRoute := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb) if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } sort.Sort(candidates) @@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ + if routeHasCustomVerb && hasCustomVerb(routeToken) { if !isMatchCustomVerb(routeToken, requestToken) { return false, 0, 0 } @@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques // detectWebService returns the best matching webService given the list of path tokens. // see also computeWebserviceScore func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService + var bestWs *WebService score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + for _, eachWS := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens) if matches && (eachScore > score) { - best = each + bestWs = eachWS score = eachScore } } - return best + return bestWs } // computeWebserviceScore returns whether tokens match and // the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) { + if len(routeTokens) > len(requestTokens) { return false, 0 } score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { + for i := 0; i < len(routeTokens); i++ { + eachRequestToken := requestTokens[i] + eachRouteToken := routeTokens[i] + if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 { score++ continue } - if len(other) > 0 && strings.HasPrefix(other, "{") { + if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") { // no empty match - if len(each) == 0 { + if len(eachRequestToken) == 0 { return false, score } - score += 1 + score++ + + if colon := strings.Index(eachRouteToken, ":"); colon != -1 { + // match by regex + matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken) + if matchesToken { + score++ // extra score for regex match + } + } } else { // not a parameter - if each != other { + if eachRequestToken != eachRouteToken { return false, score } - score += (len(tokens) - i) * 10 //fuzzy + score += (len(routeTokens) - i) * 10 //fuzzy } } return true, score diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 66dfc824f5..9808752acd 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -5,11 +5,18 @@ package restful // that can be found in the LICENSE file. import ( + "encoding/json" "encoding/xml" "strings" "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 871165166a..0000000000 --- a/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae7f..0000000000 --- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e94..7f04bd9053 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && length == "" { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be77..a2056e2acb 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index 28e3516937..0000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,317 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -These global variables control the behavior of `jsonpatch.Apply`. - -An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior -is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. - -Structure `jsonpatch.ApplyOptions` includes the configuration options above -and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. - -When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore -`remove` operations whose `path` points to a non-existent location in the JSON document. -`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` -returning an error when hitting a missing `path` on `remove`. - -When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure -that `add` operations produce all the `path` elements that are missing from the target object. - -Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` -whose values are populated from the global configuration variables. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index 4bce5936d5..0000000000 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,809 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var ( - // SupportNegativeIndices decides whether to support non-standard practice of - // allowing negative indices to mean indices starting at the end of an array. - // Default to true. - SupportNegativeIndices bool = true - // AccumulatedCopySizeLimit limits the total size increase in bytes caused by - // "copy" operations in a patch. - AccumulatedCopySizeLimit int64 = 0 -) - -var ( - ErrTestFailed = errors.New("test failed") - ErrMissing = errors.New("missing value") - ErrUnknownType = errors.New("unknown object type") - ErrInvalid = errors.New("invalid state detected") - ErrInvalidIndex = errors.New("invalid index referenced") -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -// Operation is a single JSON-Patch step, such as a single 'add' operation. -type Operation map[string]*json.RawMessage - -// Patch is an ordered collection of Operations. -type Patch []Operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, ErrUnknownType - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func deepCopy(src *lazyNode) (*lazyNode, int, error) { - if src == nil { - return nil, 0, nil - } - a, err := src.MarshalJSON() - if err != nil { - return nil, 0, err - } - sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - if len(n.doc) != len(o.doc) { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if (v == nil) != (ov == nil) { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -// Kind reads the "op" field of the Operation. -func (o Operation) Kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -// Path reads the "path" field of the Operation. -func (o Operation) Path() (string, error) { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") -} - -// From reads the "from" field of the Operation. -func (o Operation) From() (string, error) { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") -} - -func (o Operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -// ValueInterface decodes the operation value into an interface. -func (o Operation) ValueInterface() (interface{}, error) { - if obj, ok := o["value"]; ok && obj != nil { - var v interface{} - - err := json.Unmarshal(*obj, &v) - - if err != nil { - return nil, err - } - - return v, nil - } - - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -// set should only be used to implement the "replace" operation, so "key" must -// be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - (*d)[idx] = val - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) - } - - sz := len(*d) + 1 - - ary := make([]*lazyNode, sz) - - cur := *d - - if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(ary) - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx < 0 { - if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(cur) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.add(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) - } - - return nil -} - -func (p Patch) remove(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) replace(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) - } - - err = con.set(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) move(doc *container, op Operation) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) - } - - err = con.add(key, val) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) - } - - return nil -} - -func (p Patch) test(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) - } - - if val == nil { - if op.value().raw == nil { - return nil - } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) - } - - valCopy, sz, err := deepCopy(val) - if err != nil { - return errors.Wrapf(err, "error while performing deep copy") - } - - (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) - } - - err = con.add(key, valCopy) - if err != nil { - return errors.Wrapf(err, "error while adding value during copy") - } - - return nil -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - if len(doc) == 0 { - return doc, nil - } - - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - var accumulatedCopySize int64 - - for _, op := range p { - switch op.Kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) - default: - err = fmt.Errorf("Unexpected kind: %s", op.Kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go new file mode 100644 index 0000000000..e9bb0efe77 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go @@ -0,0 +1,1385 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +var ds = sync.Pool{ + New: func() any { + return new(decodeState) + }, +} + +func UnmarshalWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return nil, err + } + + d.init(data) + err = d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +func UnmarshalValid(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + return d.unmarshal(v) +} + +func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + err := d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + lastKeys []string +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + var keys []string + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + keys = append(keys, string(key)) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PointerTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + if v.Kind() == reflect.Map { + d.lastKeys = keys + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go new file mode 100644 index 0000000000..2e6eca4487 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -0,0 +1,1486 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML