Skip to content

Instantly share code, notes, and snippets.

@nikhita
Last active August 27, 2023 07:47
Show Gist options
  • Star 7 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save nikhita/deec6b19ed895e0b4df90b1d812b45b6 to your computer and use it in GitHub Desktop.
Save nikhita/deec6b19ed895e0b4df90b1d812b45b6 to your computer and use it in GitHub Desktop.

Kubernetes API Internals Codebase Walkthrough

The JSON file included in this gist gives a codebase walkthrough of the resource handling in the k8s apiserver.

The codebase walkthrough is created using CodeTour VS Code extension. To display the walkthrough in VS Code, use the JSON file using the Opening Tours functionality.

You might need to first open the workspace at the apiserver directory.

{
"$schema": "https://aka.ms/codetour-schema",
"title": "Kubernetes API internals",
"steps": [
{
"directory": "pkg/endpoints/handlers",
"description": "We will first look at how the Kubernetes apiserver creates a resource.\n\nThe REST handlers for the apiserver are located in the [staging/src/k8s.io/apiserver/pkg/endpoints/handlers](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/apiserver/pkg/endpoints/handlers) directory"
},
{
"file": "pkg/endpoints/handlers/rest.go",
"description": "The `RequestScope` contains conversion, defaulting, etc methods.",
"line": 69,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"context\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\tgrpccodes \"google.golang.org/grpc/codes\"\n\tgrpcstatus \"google.golang.org/grpc/status\"\n\n\tapiequality \"k8s.io/apimachinery/pkg/api/equality\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tmetav1beta1 \"k8s.io/apimachinery/pkg/apis/meta/v1beta1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/authorization/authorizer\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n\t\"k8s.io/apiserver/pkg/endpoints/metrics\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/apiserver/pkg/warning\"\n\t\"k8s.io/klog/v2\"\n)\n\nconst (\n\t// 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout.\n\t// Everyone chooses 30.\n\trequestTimeoutUpperBound = 34 * time.Second\n\t// DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains\n\t// duplicate owner reference entries.\n\tDuplicateOwnerReferencesWarningFormat = \".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n\t// DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed\n\t// after mutating admission.\n\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t// For PATCH request the API server only dedups after mutating admission.\n\tDuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = \".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n)\n\n// RequestScope encapsulates common fields across all RESTful handler methods.\ntype RequestScope struct {\n\tNamer ScopeNamer\n\n\tSerializer runtime.NegotiatedSerializer\n\truntime.ParameterCodec\n\n\t// StandardSerializers, if set, restricts which serializers can be used when\n\t// we aren't transforming the output (into Table or PartialObjectMetadata).\n\t// Used only by CRDs which do not yet support Protobuf.\n\tStandardSerializers []runtime.SerializerInfo\n\n\tCreater runtime.ObjectCreater\n\tConvertor runtime.ObjectConvertor\n\tDefaulter runtime.ObjectDefaulter\n\tTyper runtime.ObjectTyper\n\tUnsafeConvertor runtime.ObjectConvertor\n\tAuthorizer authorizer.Authorizer\n\n\tEquivalentResourceMapper runtime.EquivalentResourceMapper\n\n\tTableConvertor rest.TableConvertor\n\tFieldManager *fieldmanager.FieldManager\n\n\tResource schema.GroupVersionResource\n\tKind schema.GroupVersionKind\n\n\t// AcceptsGroupVersionDelegate is an optional delegate that can be queried about whether a given GVK\n\t// can be accepted in create or update requests. If nil, only scope.Kind is accepted.\n\t// Note that this does not enable multi-version support for reads from a single endpoint.\n\tAcceptsGroupVersionDelegate rest.GroupVersionAcceptor\n\n\tSubresource string\n\n\tMetaGroupVersion schema.GroupVersion\n\n\t// HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling.\n\tHubGroupVersion schema.GroupVersion\n\n\tMaxRequestBodyBytes int64\n}\n\nfunc (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {\n\tresponsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)\n}\n\n// AcceptsGroupVersion returns true if the specified GroupVersion is allowed\n// in create and update requests.\nfunc (scope *RequestScope) AcceptsGroupVersion(gv schema.GroupVersion) bool {\n\t// If there's a custom acceptor, delegate to it. This is extremely rare.\n\tif scope.AcceptsGroupVersionDelegate != nil {\n\t\treturn scope.AcceptsGroupVersionDelegate.AcceptsGroupVersion(gv)\n\t}\n\t// Fall back to only allowing the singular Kind. This is the typical behavior.\n\treturn gv == scope.Kind.GroupVersion()\n}\n\nfunc (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool {\n\t// some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if\n\t// gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types.\n\tif gvk == nil {\n\t\tif len(scope.StandardSerializers) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tfor _, info := range scope.StandardSerializers {\n\t\t\tif info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// TODO: this is temporary, replace with an abstraction calculated at endpoint installation time\n\tif gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion {\n\t\tswitch gvk.Kind {\n\t\tcase \"Table\":\n\t\t\treturn scope.TableConvertor != nil &&\n\t\t\t\tmimeType == \"application\" &&\n\t\t\t\t(mimeSubType == \"json\" || mimeSubType == \"yaml\")\n\t\tcase \"PartialObjectMetadata\", \"PartialObjectMetadataList\":\n\t\t\t// TODO: should delineate between lists and non-list endpoints\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (scope *RequestScope) AllowsServerVersion(version string) bool {\n\treturn version == scope.MetaGroupVersion.Version\n}\n\nfunc (scope *RequestScope) AllowsStreamSchema(s string) bool {\n\treturn s == \"watch\"\n}\n\nvar _ admission.ObjectInterfaces = &RequestScope{}\n\nfunc (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater }\nfunc (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer }\nfunc (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter }\nfunc (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor }\nfunc (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper {\n\treturn r.EquivalentResourceMapper\n}\n\n// ConnectResource returns a function that handles a connect request on a rest.Storage object.\nfunc ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tif isDryRun(req.URL) {\n\t\t\tscope.err(errors.NewBadRequest(\"dryRun is not supported\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tctx := req.Context()\n\t\tctx = request.WithNamespace(ctx, namespace)\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\n\t\topts, subpath, subpathKey := connecter.NewConnectOptions()\n\t\tif err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif admit != nil && admit.Handles(admission.Connect) {\n\t\t\tuserInfo, _ := request.UserFrom(ctx)\n\t\t\t// TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok {\n\t\t\t\terr = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif validatingAdmission, ok := admit.(admission.ValidationInterface); ok {\n\t\t\t\terr = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trequestInfo, _ := request.RequestInfoFrom(ctx)\n\t\tmetrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {\n\t\t\thandler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w})\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.ServeHTTP(w, req)\n\t\t})\n\t}\n}\n\n// responder implements rest.Responder for assisting a connector in writing objects or errors.\ntype responder struct {\n\tscope *RequestScope\n\treq *http.Request\n\tw http.ResponseWriter\n}\n\nfunc (r *responder) Object(statusCode int, obj runtime.Object) {\n\tresponsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj)\n}\n\nfunc (r *responder) Error(err error) {\n\tr.scope.err(err, r.w, r.req)\n}\n\n// transformDecodeError adds additional information into a bad-request api error when a decode fails.\nfunc transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {\n\tobjGVKs, _, err := typer.ObjectKinds(into)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(err.Error())\n\t}\n\tobjGVK := objGVKs[0]\n\tif gvk != nil && len(gvk.Kind) > 0 {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\"%s in version %q cannot be handled as a %s: %v\", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))\n\t}\n\tsummary := summarizeData(body, 30)\n\treturn errors.NewBadRequest(fmt.Sprintf(\"the object provided is unrecognized (must be of type %s): %v (%s)\", objGVK.Kind, baseErr, summary))\n}\n\n// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request\n// plus the path and query generated by the provided linkFunc\nfunc setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error {\n\t// TODO: SelfLink generation should return a full URL?\n\turi, err := namer.GenerateLink(requestInfo, obj)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn namer.SetSelfLink(obj, uri)\n}\n\nfunc hasUID(obj runtime.Object) (bool, error) {\n\tif obj == nil {\n\t\treturn false, nil\n\t}\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn false, errors.NewInternalError(err)\n\t}\n\tif len(accessor.GetUID()) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n// checkName checks the provided name against the request\nfunc checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {\n\tobjNamespace, objName, err := namer.ObjectName(obj)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s based on URL) was undeterminable: %v\", name, err))\n\t}\n\tif objName != name {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s) does not match the name on the URL (%s)\", objName, name))\n\t}\n\tif len(namespace) > 0 {\n\t\tif len(objNamespace) > 0 && objNamespace != namespace {\n\t\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\t\"the namespace of the object (%s) does not match the namespace on the request (%s)\", objNamespace, namespace))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// dedupOwnerReferences dedups owner references over the entire entry.\n// NOTE: We don't know enough about the existing cases of owner references\n// sharing the same UID but different fields. Nor do we know what might break.\n// In the future we may just dedup/reject owner references with the same UID.\nfunc dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) {\n\tvar result []metav1.OwnerReference\n\tvar duplicates []string\n\tseen := make(map[types.UID]struct{})\n\tfor _, ref := range refs {\n\t\t_, ok := seen[ref.UID]\n\t\t// Short-circuit if we haven't seen the UID before. Otherwise\n\t\t// check the entire list we have so far.\n\t\tif !ok || !hasOwnerReference(result, ref) {\n\t\t\tseen[ref.UID] = struct{}{}\n\t\t\tresult = append(result, ref)\n\t\t} else {\n\t\t\tduplicates = append(duplicates, string(ref.UID))\n\t\t}\n\t}\n\treturn result, duplicates\n}\n\n// hasOwnerReference returns true if refs has an item equal to ref. The function\n// focuses on semantic equality instead of memory equality, to catch duplicates\n// with different pointer addresses. The function uses apiequality.Semantic\n// instead of implementing its own comparison, to tolerate API changes to\n// metav1.OwnerReference.\n// NOTE: This is expensive, but we accept it because we've made sure it only\n// happens to owner references containing duplicate UIDs, plus typically the\n// number of items in the list should be small.\nfunc hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool {\n\tfor _, r := range refs {\n\t\tif apiequality.Semantic.DeepEqual(r, ref) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata.\n// If duplicates are found, the function records a warning to the provided context.\nfunc dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\t// The object doesn't have metadata. Nothing we need to do here.\n\t\treturn\n\t}\n\trefs := accessor.GetOwnerReferences()\n\tdeduped, duplicates := dedupOwnerReferences(refs)\n\tif len(duplicates) > 0 {\n\t\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t\t// For PATCH request the API server only dedups after mutating admission.\n\t\tformat := DuplicateOwnerReferencesWarningFormat\n\t\tif afterMutatingAdmission {\n\t\t\tformat = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat\n\t\t}\n\t\twarning.AddWarning(requestContext, \"\", fmt.Sprintf(format,\n\t\t\tstrings.Join(duplicates, \", \")))\n\t\taccessor.SetOwnerReferences(deduped)\n\t}\n}\n\n// setObjectSelfLink sets the self link of an object as needed.\n// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List\n// interfaces\nfunc setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error {\n\tif utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) {\n\t\t// Ensure that for empty lists we don't return <nil> items.\n\t\tif meta.IsListType(obj) && meta.LenList(obj) == 0 {\n\t\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// We only generate list links on objects that implement ListInterface - historically we duck typed this\n\t// check via reflection, but as we move away from reflection we require that you not only carry Items but\n\t// ListMeta into order to be identified as a list.\n\tif !meta.IsListType(obj) {\n\t\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t\t}\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t}\n\n\turi, err := namer.GenerateListLink(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := namer.SetSelfLink(obj, uri); err != nil {\n\t\tklog.V(4).InfoS(\"Unable to set self link on object\", \"error\", err)\n\t}\n\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t}\n\n\tcount := 0\n\terr = meta.EachListItem(obj, func(obj runtime.Object) error {\n\t\tcount++\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t})\n\n\tif count == 0 {\n\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc summarizeData(data []byte, maxLength int) string {\n\tswitch {\n\tcase len(data) == 0:\n\t\treturn \"<empty>\"\n\tcase data[0] == '{':\n\t\tif len(data) > maxLength {\n\t\t\treturn string(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn string(data)\n\tdefault:\n\t\tif len(data) > maxLength {\n\t\t\treturn hex.EncodeToString(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn hex.EncodeToString(data)\n\t}\n}\n\nfunc limitedReadBody(req *http.Request, limit int64) ([]byte, error) {\n\tdefer req.Body.Close()\n\tif limit <= 0 {\n\t\treturn ioutil.ReadAll(req.Body)\n\t}\n\tlr := &io.LimitedReader{\n\t\tR: req.Body,\n\t\tN: limit + 1,\n\t}\n\tdata, err := ioutil.ReadAll(lr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lr.N <= 0 {\n\t\treturn nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf(\"limit is %d\", limit))\n\t}\n\treturn data, nil\n}\n\nfunc isDryRun(url *url.URL) bool {\n\treturn len(url.Query()[\"dryRun\"]) != 0\n}\n\ntype etcdError interface {\n\tCode() grpccodes.Code\n\tError() string\n}\n\ntype grpcError interface {\n\tGRPCStatus() *grpcstatus.Status\n}\n\nfunc isTooLargeError(err error) bool {\n\tif err != nil {\n\t\tif etcdErr, ok := err.(etcdError); ok {\n\t\t\tif etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == \"etcdserver: request is too large\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif grpcErr, ok := err.(grpcError); ok {\n\t\t\tif grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), \"trying to send message larger than max\") {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
},
{
"file": "pkg/endpoints/handlers/rest.go",
"description": "The `NegotiatedSerializer` is an interface used for obtaining encoders and decoders for multiple supported media types like json, yaml, etc.",
"line": 72,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"context\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\tgrpccodes \"google.golang.org/grpc/codes\"\n\tgrpcstatus \"google.golang.org/grpc/status\"\n\n\tapiequality \"k8s.io/apimachinery/pkg/api/equality\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tmetav1beta1 \"k8s.io/apimachinery/pkg/apis/meta/v1beta1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/authorization/authorizer\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n\t\"k8s.io/apiserver/pkg/endpoints/metrics\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/apiserver/pkg/warning\"\n\t\"k8s.io/klog/v2\"\n)\n\nconst (\n\t// 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout.\n\t// Everyone chooses 30.\n\trequestTimeoutUpperBound = 34 * time.Second\n\t// DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains\n\t// duplicate owner reference entries.\n\tDuplicateOwnerReferencesWarningFormat = \".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n\t// DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed\n\t// after mutating admission.\n\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t// For PATCH request the API server only dedups after mutating admission.\n\tDuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = \".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n)\n\n// RequestScope encapsulates common fields across all RESTful handler methods.\ntype RequestScope struct {\n\tNamer ScopeNamer\n\n\tSerializer runtime.NegotiatedSerializer\n\truntime.ParameterCodec\n\n\t// StandardSerializers, if set, restricts which serializers can be used when\n\t// we aren't transforming the output (into Table or PartialObjectMetadata).\n\t// Used only by CRDs which do not yet support Protobuf.\n\tStandardSerializers []runtime.SerializerInfo\n\n\tCreater runtime.ObjectCreater\n\tConvertor runtime.ObjectConvertor\n\tDefaulter runtime.ObjectDefaulter\n\tTyper runtime.ObjectTyper\n\tUnsafeConvertor runtime.ObjectConvertor\n\tAuthorizer authorizer.Authorizer\n\n\tEquivalentResourceMapper runtime.EquivalentResourceMapper\n\n\tTableConvertor rest.TableConvertor\n\tFieldManager *fieldmanager.FieldManager\n\n\tResource schema.GroupVersionResource\n\tKind schema.GroupVersionKind\n\n\t// AcceptsGroupVersionDelegate is an optional delegate that can be queried about whether a given GVK\n\t// can be accepted in create or update requests. If nil, only scope.Kind is accepted.\n\t// Note that this does not enable multi-version support for reads from a single endpoint.\n\tAcceptsGroupVersionDelegate rest.GroupVersionAcceptor\n\n\tSubresource string\n\n\tMetaGroupVersion schema.GroupVersion\n\n\t// HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling.\n\tHubGroupVersion schema.GroupVersion\n\n\tMaxRequestBodyBytes int64\n}\n\nfunc (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {\n\tresponsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)\n}\n\n// AcceptsGroupVersion returns true if the specified GroupVersion is allowed\n// in create and update requests.\nfunc (scope *RequestScope) AcceptsGroupVersion(gv schema.GroupVersion) bool {\n\t// If there's a custom acceptor, delegate to it. This is extremely rare.\n\tif scope.AcceptsGroupVersionDelegate != nil {\n\t\treturn scope.AcceptsGroupVersionDelegate.AcceptsGroupVersion(gv)\n\t}\n\t// Fall back to only allowing the singular Kind. This is the typical behavior.\n\treturn gv == scope.Kind.GroupVersion()\n}\n\nfunc (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool {\n\t// some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if\n\t// gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types.\n\tif gvk == nil {\n\t\tif len(scope.StandardSerializers) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tfor _, info := range scope.StandardSerializers {\n\t\t\tif info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// TODO: this is temporary, replace with an abstraction calculated at endpoint installation time\n\tif gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion {\n\t\tswitch gvk.Kind {\n\t\tcase \"Table\":\n\t\t\treturn scope.TableConvertor != nil &&\n\t\t\t\tmimeType == \"application\" &&\n\t\t\t\t(mimeSubType == \"json\" || mimeSubType == \"yaml\")\n\t\tcase \"PartialObjectMetadata\", \"PartialObjectMetadataList\":\n\t\t\t// TODO: should delineate between lists and non-list endpoints\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (scope *RequestScope) AllowsServerVersion(version string) bool {\n\treturn version == scope.MetaGroupVersion.Version\n}\n\nfunc (scope *RequestScope) AllowsStreamSchema(s string) bool {\n\treturn s == \"watch\"\n}\n\nvar _ admission.ObjectInterfaces = &RequestScope{}\n\nfunc (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater }\nfunc (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer }\nfunc (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter }\nfunc (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor }\nfunc (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper {\n\treturn r.EquivalentResourceMapper\n}\n\n// ConnectResource returns a function that handles a connect request on a rest.Storage object.\nfunc ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tif isDryRun(req.URL) {\n\t\t\tscope.err(errors.NewBadRequest(\"dryRun is not supported\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tctx := req.Context()\n\t\tctx = request.WithNamespace(ctx, namespace)\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\n\t\topts, subpath, subpathKey := connecter.NewConnectOptions()\n\t\tif err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif admit != nil && admit.Handles(admission.Connect) {\n\t\t\tuserInfo, _ := request.UserFrom(ctx)\n\t\t\t// TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok {\n\t\t\t\terr = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif validatingAdmission, ok := admit.(admission.ValidationInterface); ok {\n\t\t\t\terr = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trequestInfo, _ := request.RequestInfoFrom(ctx)\n\t\tmetrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {\n\t\t\thandler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w})\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.ServeHTTP(w, req)\n\t\t})\n\t}\n}\n\n// responder implements rest.Responder for assisting a connector in writing objects or errors.\ntype responder struct {\n\tscope *RequestScope\n\treq *http.Request\n\tw http.ResponseWriter\n}\n\nfunc (r *responder) Object(statusCode int, obj runtime.Object) {\n\tresponsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj)\n}\n\nfunc (r *responder) Error(err error) {\n\tr.scope.err(err, r.w, r.req)\n}\n\n// transformDecodeError adds additional information into a bad-request api error when a decode fails.\nfunc transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {\n\tobjGVKs, _, err := typer.ObjectKinds(into)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(err.Error())\n\t}\n\tobjGVK := objGVKs[0]\n\tif gvk != nil && len(gvk.Kind) > 0 {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\"%s in version %q cannot be handled as a %s: %v\", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))\n\t}\n\tsummary := summarizeData(body, 30)\n\treturn errors.NewBadRequest(fmt.Sprintf(\"the object provided is unrecognized (must be of type %s): %v (%s)\", objGVK.Kind, baseErr, summary))\n}\n\n// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request\n// plus the path and query generated by the provided linkFunc\nfunc setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error {\n\t// TODO: SelfLink generation should return a full URL?\n\turi, err := namer.GenerateLink(requestInfo, obj)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn namer.SetSelfLink(obj, uri)\n}\n\nfunc hasUID(obj runtime.Object) (bool, error) {\n\tif obj == nil {\n\t\treturn false, nil\n\t}\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn false, errors.NewInternalError(err)\n\t}\n\tif len(accessor.GetUID()) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n// checkName checks the provided name against the request\nfunc checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {\n\tobjNamespace, objName, err := namer.ObjectName(obj)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s based on URL) was undeterminable: %v\", name, err))\n\t}\n\tif objName != name {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s) does not match the name on the URL (%s)\", objName, name))\n\t}\n\tif len(namespace) > 0 {\n\t\tif len(objNamespace) > 0 && objNamespace != namespace {\n\t\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\t\"the namespace of the object (%s) does not match the namespace on the request (%s)\", objNamespace, namespace))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// dedupOwnerReferences dedups owner references over the entire entry.\n// NOTE: We don't know enough about the existing cases of owner references\n// sharing the same UID but different fields. Nor do we know what might break.\n// In the future we may just dedup/reject owner references with the same UID.\nfunc dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) {\n\tvar result []metav1.OwnerReference\n\tvar duplicates []string\n\tseen := make(map[types.UID]struct{})\n\tfor _, ref := range refs {\n\t\t_, ok := seen[ref.UID]\n\t\t// Short-circuit if we haven't seen the UID before. Otherwise\n\t\t// check the entire list we have so far.\n\t\tif !ok || !hasOwnerReference(result, ref) {\n\t\t\tseen[ref.UID] = struct{}{}\n\t\t\tresult = append(result, ref)\n\t\t} else {\n\t\t\tduplicates = append(duplicates, string(ref.UID))\n\t\t}\n\t}\n\treturn result, duplicates\n}\n\n// hasOwnerReference returns true if refs has an item equal to ref. The function\n// focuses on semantic equality instead of memory equality, to catch duplicates\n// with different pointer addresses. The function uses apiequality.Semantic\n// instead of implementing its own comparison, to tolerate API changes to\n// metav1.OwnerReference.\n// NOTE: This is expensive, but we accept it because we've made sure it only\n// happens to owner references containing duplicate UIDs, plus typically the\n// number of items in the list should be small.\nfunc hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool {\n\tfor _, r := range refs {\n\t\tif apiequality.Semantic.DeepEqual(r, ref) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata.\n// If duplicates are found, the function records a warning to the provided context.\nfunc dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\t// The object doesn't have metadata. Nothing we need to do here.\n\t\treturn\n\t}\n\trefs := accessor.GetOwnerReferences()\n\tdeduped, duplicates := dedupOwnerReferences(refs)\n\tif len(duplicates) > 0 {\n\t\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t\t// For PATCH request the API server only dedups after mutating admission.\n\t\tformat := DuplicateOwnerReferencesWarningFormat\n\t\tif afterMutatingAdmission {\n\t\t\tformat = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat\n\t\t}\n\t\twarning.AddWarning(requestContext, \"\", fmt.Sprintf(format,\n\t\t\tstrings.Join(duplicates, \", \")))\n\t\taccessor.SetOwnerReferences(deduped)\n\t}\n}\n\n// setObjectSelfLink sets the self link of an object as needed.\n// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List\n// interfaces\nfunc setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error {\n\tif utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) {\n\t\t// Ensure that for empty lists we don't return <nil> items.\n\t\tif meta.IsListType(obj) && meta.LenList(obj) == 0 {\n\t\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// We only generate list links on objects that implement ListInterface - historically we duck typed this\n\t// check via reflection, but as we move away from reflection we require that you not only carry Items but\n\t// ListMeta into order to be identified as a list.\n\tif !meta.IsListType(obj) {\n\t\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t\t}\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t}\n\n\turi, err := namer.GenerateListLink(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := namer.SetSelfLink(obj, uri); err != nil {\n\t\tklog.V(4).InfoS(\"Unable to set self link on object\", \"error\", err)\n\t}\n\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t}\n\n\tcount := 0\n\terr = meta.EachListItem(obj, func(obj runtime.Object) error {\n\t\tcount++\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t})\n\n\tif count == 0 {\n\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc summarizeData(data []byte, maxLength int) string {\n\tswitch {\n\tcase len(data) == 0:\n\t\treturn \"<empty>\"\n\tcase data[0] == '{':\n\t\tif len(data) > maxLength {\n\t\t\treturn string(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn string(data)\n\tdefault:\n\t\tif len(data) > maxLength {\n\t\t\treturn hex.EncodeToString(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn hex.EncodeToString(data)\n\t}\n}\n\nfunc limitedReadBody(req *http.Request, limit int64) ([]byte, error) {\n\tdefer req.Body.Close()\n\tif limit <= 0 {\n\t\treturn ioutil.ReadAll(req.Body)\n\t}\n\tlr := &io.LimitedReader{\n\t\tR: req.Body,\n\t\tN: limit + 1,\n\t}\n\tdata, err := ioutil.ReadAll(lr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lr.N <= 0 {\n\t\treturn nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf(\"limit is %d\", limit))\n\t}\n\treturn data, nil\n}\n\nfunc isDryRun(url *url.URL) bool {\n\treturn len(url.Query()[\"dryRun\"]) != 0\n}\n\ntype etcdError interface {\n\tCode() grpccodes.Code\n\tError() string\n}\n\ntype grpcError interface {\n\tGRPCStatus() *grpcstatus.Status\n}\n\nfunc isTooLargeError(err error) bool {\n\tif err != nil {\n\t\tif etcdErr, ok := err.(etcdError); ok {\n\t\t\tif etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == \"etcdserver: request is too large\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif grpcErr, ok := err.(grpcError); ok {\n\t\t\tif grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), \"trying to send message larger than max\") {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
},
{
"file": "pkg/endpoints/handlers/rest.go",
"description": "The `ObjectConvertor` allows converting objects from one version to another.\n\nExample: from v1 => internal version.",
"line": 81,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"context\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\tgrpccodes \"google.golang.org/grpc/codes\"\n\tgrpcstatus \"google.golang.org/grpc/status\"\n\n\tapiequality \"k8s.io/apimachinery/pkg/api/equality\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tmetav1beta1 \"k8s.io/apimachinery/pkg/apis/meta/v1beta1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/authorization/authorizer\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n\t\"k8s.io/apiserver/pkg/endpoints/metrics\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/apiserver/pkg/warning\"\n\t\"k8s.io/klog/v2\"\n)\n\nconst (\n\t// 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout.\n\t// Everyone chooses 30.\n\trequestTimeoutUpperBound = 34 * time.Second\n\t// DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains\n\t// duplicate owner reference entries.\n\tDuplicateOwnerReferencesWarningFormat = \".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n\t// DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed\n\t// after mutating admission.\n\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t// For PATCH request the API server only dedups after mutating admission.\n\tDuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = \".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n)\n\n// RequestScope encapsulates common fields across all RESTful handler methods.\ntype RequestScope struct {\n\tNamer ScopeNamer\n\n\tSerializer runtime.NegotiatedSerializer\n\truntime.ParameterCodec\n\n\t// StandardSerializers, if set, restricts which serializers can be used when\n\t// we aren't transforming the output (into Table or PartialObjectMetadata).\n\t// Used only by CRDs which do not yet support Protobuf.\n\tStandardSerializers []runtime.SerializerInfo\n\n\tCreater runtime.ObjectCreater\n\tConvertor runtime.ObjectConvertor\n\tDefaulter runtime.ObjectDefaulter\n\tTyper runtime.ObjectTyper\n\tUnsafeConvertor runtime.ObjectConvertor\n\tAuthorizer authorizer.Authorizer\n\n\tEquivalentResourceMapper runtime.EquivalentResourceMapper\n\n\tTableConvertor rest.TableConvertor\n\tFieldManager *fieldmanager.FieldManager\n\n\tResource schema.GroupVersionResource\n\tKind schema.GroupVersionKind\n\n\t// AcceptsGroupVersionDelegate is an optional delegate that can be queried about whether a given GVK\n\t// can be accepted in create or update requests. If nil, only scope.Kind is accepted.\n\t// Note that this does not enable multi-version support for reads from a single endpoint.\n\tAcceptsGroupVersionDelegate rest.GroupVersionAcceptor\n\n\tSubresource string\n\n\tMetaGroupVersion schema.GroupVersion\n\n\t// HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling.\n\tHubGroupVersion schema.GroupVersion\n\n\tMaxRequestBodyBytes int64\n}\n\nfunc (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {\n\tresponsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)\n}\n\n// AcceptsGroupVersion returns true if the specified GroupVersion is allowed\n// in create and update requests.\nfunc (scope *RequestScope) AcceptsGroupVersion(gv schema.GroupVersion) bool {\n\t// If there's a custom acceptor, delegate to it. This is extremely rare.\n\tif scope.AcceptsGroupVersionDelegate != nil {\n\t\treturn scope.AcceptsGroupVersionDelegate.AcceptsGroupVersion(gv)\n\t}\n\t// Fall back to only allowing the singular Kind. This is the typical behavior.\n\treturn gv == scope.Kind.GroupVersion()\n}\n\nfunc (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool {\n\t// some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if\n\t// gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types.\n\tif gvk == nil {\n\t\tif len(scope.StandardSerializers) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tfor _, info := range scope.StandardSerializers {\n\t\t\tif info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// TODO: this is temporary, replace with an abstraction calculated at endpoint installation time\n\tif gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion {\n\t\tswitch gvk.Kind {\n\t\tcase \"Table\":\n\t\t\treturn scope.TableConvertor != nil &&\n\t\t\t\tmimeType == \"application\" &&\n\t\t\t\t(mimeSubType == \"json\" || mimeSubType == \"yaml\")\n\t\tcase \"PartialObjectMetadata\", \"PartialObjectMetadataList\":\n\t\t\t// TODO: should delineate between lists and non-list endpoints\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (scope *RequestScope) AllowsServerVersion(version string) bool {\n\treturn version == scope.MetaGroupVersion.Version\n}\n\nfunc (scope *RequestScope) AllowsStreamSchema(s string) bool {\n\treturn s == \"watch\"\n}\n\nvar _ admission.ObjectInterfaces = &RequestScope{}\n\nfunc (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater }\nfunc (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer }\nfunc (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter }\nfunc (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor }\nfunc (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper {\n\treturn r.EquivalentResourceMapper\n}\n\n// ConnectResource returns a function that handles a connect request on a rest.Storage object.\nfunc ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tif isDryRun(req.URL) {\n\t\t\tscope.err(errors.NewBadRequest(\"dryRun is not supported\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tctx := req.Context()\n\t\tctx = request.WithNamespace(ctx, namespace)\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\n\t\topts, subpath, subpathKey := connecter.NewConnectOptions()\n\t\tif err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif admit != nil && admit.Handles(admission.Connect) {\n\t\t\tuserInfo, _ := request.UserFrom(ctx)\n\t\t\t// TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok {\n\t\t\t\terr = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif validatingAdmission, ok := admit.(admission.ValidationInterface); ok {\n\t\t\t\terr = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trequestInfo, _ := request.RequestInfoFrom(ctx)\n\t\tmetrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {\n\t\t\thandler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w})\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.ServeHTTP(w, req)\n\t\t})\n\t}\n}\n\n// responder implements rest.Responder for assisting a connector in writing objects or errors.\ntype responder struct {\n\tscope *RequestScope\n\treq *http.Request\n\tw http.ResponseWriter\n}\n\nfunc (r *responder) Object(statusCode int, obj runtime.Object) {\n\tresponsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj)\n}\n\nfunc (r *responder) Error(err error) {\n\tr.scope.err(err, r.w, r.req)\n}\n\n// transformDecodeError adds additional information into a bad-request api error when a decode fails.\nfunc transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {\n\tobjGVKs, _, err := typer.ObjectKinds(into)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(err.Error())\n\t}\n\tobjGVK := objGVKs[0]\n\tif gvk != nil && len(gvk.Kind) > 0 {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\"%s in version %q cannot be handled as a %s: %v\", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))\n\t}\n\tsummary := summarizeData(body, 30)\n\treturn errors.NewBadRequest(fmt.Sprintf(\"the object provided is unrecognized (must be of type %s): %v (%s)\", objGVK.Kind, baseErr, summary))\n}\n\n// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request\n// plus the path and query generated by the provided linkFunc\nfunc setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error {\n\t// TODO: SelfLink generation should return a full URL?\n\turi, err := namer.GenerateLink(requestInfo, obj)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn namer.SetSelfLink(obj, uri)\n}\n\nfunc hasUID(obj runtime.Object) (bool, error) {\n\tif obj == nil {\n\t\treturn false, nil\n\t}\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn false, errors.NewInternalError(err)\n\t}\n\tif len(accessor.GetUID()) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n// checkName checks the provided name against the request\nfunc checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {\n\tobjNamespace, objName, err := namer.ObjectName(obj)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s based on URL) was undeterminable: %v\", name, err))\n\t}\n\tif objName != name {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s) does not match the name on the URL (%s)\", objName, name))\n\t}\n\tif len(namespace) > 0 {\n\t\tif len(objNamespace) > 0 && objNamespace != namespace {\n\t\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\t\"the namespace of the object (%s) does not match the namespace on the request (%s)\", objNamespace, namespace))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// dedupOwnerReferences dedups owner references over the entire entry.\n// NOTE: We don't know enough about the existing cases of owner references\n// sharing the same UID but different fields. Nor do we know what might break.\n// In the future we may just dedup/reject owner references with the same UID.\nfunc dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) {\n\tvar result []metav1.OwnerReference\n\tvar duplicates []string\n\tseen := make(map[types.UID]struct{})\n\tfor _, ref := range refs {\n\t\t_, ok := seen[ref.UID]\n\t\t// Short-circuit if we haven't seen the UID before. Otherwise\n\t\t// check the entire list we have so far.\n\t\tif !ok || !hasOwnerReference(result, ref) {\n\t\t\tseen[ref.UID] = struct{}{}\n\t\t\tresult = append(result, ref)\n\t\t} else {\n\t\t\tduplicates = append(duplicates, string(ref.UID))\n\t\t}\n\t}\n\treturn result, duplicates\n}\n\n// hasOwnerReference returns true if refs has an item equal to ref. The function\n// focuses on semantic equality instead of memory equality, to catch duplicates\n// with different pointer addresses. The function uses apiequality.Semantic\n// instead of implementing its own comparison, to tolerate API changes to\n// metav1.OwnerReference.\n// NOTE: This is expensive, but we accept it because we've made sure it only\n// happens to owner references containing duplicate UIDs, plus typically the\n// number of items in the list should be small.\nfunc hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool {\n\tfor _, r := range refs {\n\t\tif apiequality.Semantic.DeepEqual(r, ref) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata.\n// If duplicates are found, the function records a warning to the provided context.\nfunc dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\t// The object doesn't have metadata. Nothing we need to do here.\n\t\treturn\n\t}\n\trefs := accessor.GetOwnerReferences()\n\tdeduped, duplicates := dedupOwnerReferences(refs)\n\tif len(duplicates) > 0 {\n\t\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t\t// For PATCH request the API server only dedups after mutating admission.\n\t\tformat := DuplicateOwnerReferencesWarningFormat\n\t\tif afterMutatingAdmission {\n\t\t\tformat = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat\n\t\t}\n\t\twarning.AddWarning(requestContext, \"\", fmt.Sprintf(format,\n\t\t\tstrings.Join(duplicates, \", \")))\n\t\taccessor.SetOwnerReferences(deduped)\n\t}\n}\n\n// setObjectSelfLink sets the self link of an object as needed.\n// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List\n// interfaces\nfunc setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error {\n\tif utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) {\n\t\t// Ensure that for empty lists we don't return <nil> items.\n\t\tif meta.IsListType(obj) && meta.LenList(obj) == 0 {\n\t\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// We only generate list links on objects that implement ListInterface - historically we duck typed this\n\t// check via reflection, but as we move away from reflection we require that you not only carry Items but\n\t// ListMeta into order to be identified as a list.\n\tif !meta.IsListType(obj) {\n\t\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t\t}\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t}\n\n\turi, err := namer.GenerateListLink(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := namer.SetSelfLink(obj, uri); err != nil {\n\t\tklog.V(4).InfoS(\"Unable to set self link on object\", \"error\", err)\n\t}\n\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t}\n\n\tcount := 0\n\terr = meta.EachListItem(obj, func(obj runtime.Object) error {\n\t\tcount++\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t})\n\n\tif count == 0 {\n\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc summarizeData(data []byte, maxLength int) string {\n\tswitch {\n\tcase len(data) == 0:\n\t\treturn \"<empty>\"\n\tcase data[0] == '{':\n\t\tif len(data) > maxLength {\n\t\t\treturn string(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn string(data)\n\tdefault:\n\t\tif len(data) > maxLength {\n\t\t\treturn hex.EncodeToString(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn hex.EncodeToString(data)\n\t}\n}\n\nfunc limitedReadBody(req *http.Request, limit int64) ([]byte, error) {\n\tdefer req.Body.Close()\n\tif limit <= 0 {\n\t\treturn ioutil.ReadAll(req.Body)\n\t}\n\tlr := &io.LimitedReader{\n\t\tR: req.Body,\n\t\tN: limit + 1,\n\t}\n\tdata, err := ioutil.ReadAll(lr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lr.N <= 0 {\n\t\treturn nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf(\"limit is %d\", limit))\n\t}\n\treturn data, nil\n}\n\nfunc isDryRun(url *url.URL) bool {\n\treturn len(url.Query()[\"dryRun\"]) != 0\n}\n\ntype etcdError interface {\n\tCode() grpccodes.Code\n\tError() string\n}\n\ntype grpcError interface {\n\tGRPCStatus() *grpcstatus.Status\n}\n\nfunc isTooLargeError(err error) bool {\n\tif err != nil {\n\t\tif etcdErr, ok := err.(etcdError); ok {\n\t\t\tif etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == \"etcdserver: request is too large\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif grpcErr, ok := err.(grpcError); ok {\n\t\t\tif grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), \"trying to send message larger than max\") {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
},
{
"file": "pkg/endpoints/handlers/rest.go",
"description": "`ObjectDefaulter` applies default values.",
"line": 82,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"context\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\tgrpccodes \"google.golang.org/grpc/codes\"\n\tgrpcstatus \"google.golang.org/grpc/status\"\n\n\tapiequality \"k8s.io/apimachinery/pkg/api/equality\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tmetav1beta1 \"k8s.io/apimachinery/pkg/apis/meta/v1beta1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/authorization/authorizer\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n\t\"k8s.io/apiserver/pkg/endpoints/metrics\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/apiserver/pkg/warning\"\n\t\"k8s.io/klog/v2\"\n)\n\nconst (\n\t// 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout.\n\t// Everyone chooses 30.\n\trequestTimeoutUpperBound = 34 * time.Second\n\t// DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains\n\t// duplicate owner reference entries.\n\tDuplicateOwnerReferencesWarningFormat = \".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n\t// DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed\n\t// after mutating admission.\n\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t// For PATCH request the API server only dedups after mutating admission.\n\tDuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = \".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v\"\n)\n\n// RequestScope encapsulates common fields across all RESTful handler methods.\ntype RequestScope struct {\n\tNamer ScopeNamer\n\n\tSerializer runtime.NegotiatedSerializer\n\truntime.ParameterCodec\n\n\t// StandardSerializers, if set, restricts which serializers can be used when\n\t// we aren't transforming the output (into Table or PartialObjectMetadata).\n\t// Used only by CRDs which do not yet support Protobuf.\n\tStandardSerializers []runtime.SerializerInfo\n\n\tCreater runtime.ObjectCreater\n\tConvertor runtime.ObjectConvertor\n\tDefaulter runtime.ObjectDefaulter\n\tTyper runtime.ObjectTyper\n\tUnsafeConvertor runtime.ObjectConvertor\n\tAuthorizer authorizer.Authorizer\n\n\tEquivalentResourceMapper runtime.EquivalentResourceMapper\n\n\tTableConvertor rest.TableConvertor\n\tFieldManager *fieldmanager.FieldManager\n\n\tResource schema.GroupVersionResource\n\tKind schema.GroupVersionKind\n\n\t// AcceptsGroupVersionDelegate is an optional delegate that can be queried about whether a given GVK\n\t// can be accepted in create or update requests. If nil, only scope.Kind is accepted.\n\t// Note that this does not enable multi-version support for reads from a single endpoint.\n\tAcceptsGroupVersionDelegate rest.GroupVersionAcceptor\n\n\tSubresource string\n\n\tMetaGroupVersion schema.GroupVersion\n\n\t// HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling.\n\tHubGroupVersion schema.GroupVersion\n\n\tMaxRequestBodyBytes int64\n}\n\nfunc (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {\n\tresponsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)\n}\n\n// AcceptsGroupVersion returns true if the specified GroupVersion is allowed\n// in create and update requests.\nfunc (scope *RequestScope) AcceptsGroupVersion(gv schema.GroupVersion) bool {\n\t// If there's a custom acceptor, delegate to it. This is extremely rare.\n\tif scope.AcceptsGroupVersionDelegate != nil {\n\t\treturn scope.AcceptsGroupVersionDelegate.AcceptsGroupVersion(gv)\n\t}\n\t// Fall back to only allowing the singular Kind. This is the typical behavior.\n\treturn gv == scope.Kind.GroupVersion()\n}\n\nfunc (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool {\n\t// some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if\n\t// gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types.\n\tif gvk == nil {\n\t\tif len(scope.StandardSerializers) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tfor _, info := range scope.StandardSerializers {\n\t\t\tif info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// TODO: this is temporary, replace with an abstraction calculated at endpoint installation time\n\tif gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion {\n\t\tswitch gvk.Kind {\n\t\tcase \"Table\":\n\t\t\treturn scope.TableConvertor != nil &&\n\t\t\t\tmimeType == \"application\" &&\n\t\t\t\t(mimeSubType == \"json\" || mimeSubType == \"yaml\")\n\t\tcase \"PartialObjectMetadata\", \"PartialObjectMetadataList\":\n\t\t\t// TODO: should delineate between lists and non-list endpoints\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (scope *RequestScope) AllowsServerVersion(version string) bool {\n\treturn version == scope.MetaGroupVersion.Version\n}\n\nfunc (scope *RequestScope) AllowsStreamSchema(s string) bool {\n\treturn s == \"watch\"\n}\n\nvar _ admission.ObjectInterfaces = &RequestScope{}\n\nfunc (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater }\nfunc (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer }\nfunc (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter }\nfunc (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor }\nfunc (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper {\n\treturn r.EquivalentResourceMapper\n}\n\n// ConnectResource returns a function that handles a connect request on a rest.Storage object.\nfunc ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tif isDryRun(req.URL) {\n\t\t\tscope.err(errors.NewBadRequest(\"dryRun is not supported\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tctx := req.Context()\n\t\tctx = request.WithNamespace(ctx, namespace)\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\n\t\topts, subpath, subpathKey := connecter.NewConnectOptions()\n\t\tif err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif admit != nil && admit.Handles(admission.Connect) {\n\t\t\tuserInfo, _ := request.UserFrom(ctx)\n\t\t\t// TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok {\n\t\t\t\terr = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif validatingAdmission, ok := admit.(admission.ValidationInterface); ok {\n\t\t\t\terr = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)\n\t\t\t\tif err != nil {\n\t\t\t\t\tscope.err(err, w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trequestInfo, _ := request.RequestInfoFrom(ctx)\n\t\tmetrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {\n\t\t\thandler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w})\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler.ServeHTTP(w, req)\n\t\t})\n\t}\n}\n\n// responder implements rest.Responder for assisting a connector in writing objects or errors.\ntype responder struct {\n\tscope *RequestScope\n\treq *http.Request\n\tw http.ResponseWriter\n}\n\nfunc (r *responder) Object(statusCode int, obj runtime.Object) {\n\tresponsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj)\n}\n\nfunc (r *responder) Error(err error) {\n\tr.scope.err(err, r.w, r.req)\n}\n\n// transformDecodeError adds additional information into a bad-request api error when a decode fails.\nfunc transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {\n\tobjGVKs, _, err := typer.ObjectKinds(into)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(err.Error())\n\t}\n\tobjGVK := objGVKs[0]\n\tif gvk != nil && len(gvk.Kind) > 0 {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\"%s in version %q cannot be handled as a %s: %v\", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))\n\t}\n\tsummary := summarizeData(body, 30)\n\treturn errors.NewBadRequest(fmt.Sprintf(\"the object provided is unrecognized (must be of type %s): %v (%s)\", objGVK.Kind, baseErr, summary))\n}\n\n// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request\n// plus the path and query generated by the provided linkFunc\nfunc setSelfLink(obj runtime.Object, requestInfo *request.RequestInfo, namer ScopeNamer) error {\n\t// TODO: SelfLink generation should return a full URL?\n\turi, err := namer.GenerateLink(requestInfo, obj)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn namer.SetSelfLink(obj, uri)\n}\n\nfunc hasUID(obj runtime.Object) (bool, error) {\n\tif obj == nil {\n\t\treturn false, nil\n\t}\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn false, errors.NewInternalError(err)\n\t}\n\tif len(accessor.GetUID()) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n// checkName checks the provided name against the request\nfunc checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {\n\tobjNamespace, objName, err := namer.ObjectName(obj)\n\tif err != nil {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s based on URL) was undeterminable: %v\", name, err))\n\t}\n\tif objName != name {\n\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\"the name of the object (%s) does not match the name on the URL (%s)\", objName, name))\n\t}\n\tif len(namespace) > 0 {\n\t\tif len(objNamespace) > 0 && objNamespace != namespace {\n\t\t\treturn errors.NewBadRequest(fmt.Sprintf(\n\t\t\t\t\"the namespace of the object (%s) does not match the namespace on the request (%s)\", objNamespace, namespace))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// dedupOwnerReferences dedups owner references over the entire entry.\n// NOTE: We don't know enough about the existing cases of owner references\n// sharing the same UID but different fields. Nor do we know what might break.\n// In the future we may just dedup/reject owner references with the same UID.\nfunc dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) {\n\tvar result []metav1.OwnerReference\n\tvar duplicates []string\n\tseen := make(map[types.UID]struct{})\n\tfor _, ref := range refs {\n\t\t_, ok := seen[ref.UID]\n\t\t// Short-circuit if we haven't seen the UID before. Otherwise\n\t\t// check the entire list we have so far.\n\t\tif !ok || !hasOwnerReference(result, ref) {\n\t\t\tseen[ref.UID] = struct{}{}\n\t\t\tresult = append(result, ref)\n\t\t} else {\n\t\t\tduplicates = append(duplicates, string(ref.UID))\n\t\t}\n\t}\n\treturn result, duplicates\n}\n\n// hasOwnerReference returns true if refs has an item equal to ref. The function\n// focuses on semantic equality instead of memory equality, to catch duplicates\n// with different pointer addresses. The function uses apiequality.Semantic\n// instead of implementing its own comparison, to tolerate API changes to\n// metav1.OwnerReference.\n// NOTE: This is expensive, but we accept it because we've made sure it only\n// happens to owner references containing duplicate UIDs, plus typically the\n// number of items in the list should be small.\nfunc hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool {\n\tfor _, r := range refs {\n\t\tif apiequality.Semantic.DeepEqual(r, ref) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata.\n// If duplicates are found, the function records a warning to the provided context.\nfunc dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\t// The object doesn't have metadata. Nothing we need to do here.\n\t\treturn\n\t}\n\trefs := accessor.GetOwnerReferences()\n\tdeduped, duplicates := dedupOwnerReferences(refs)\n\tif len(duplicates) > 0 {\n\t\t// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.\n\t\t// For PATCH request the API server only dedups after mutating admission.\n\t\tformat := DuplicateOwnerReferencesWarningFormat\n\t\tif afterMutatingAdmission {\n\t\t\tformat = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat\n\t\t}\n\t\twarning.AddWarning(requestContext, \"\", fmt.Sprintf(format,\n\t\t\tstrings.Join(duplicates, \", \")))\n\t\taccessor.SetOwnerReferences(deduped)\n\t}\n}\n\n// setObjectSelfLink sets the self link of an object as needed.\n// TODO: remove the need for the namer LinkSetters by requiring objects implement either Object or List\n// interfaces\nfunc setObjectSelfLink(ctx context.Context, obj runtime.Object, req *http.Request, namer ScopeNamer) error {\n\tif utilfeature.DefaultFeatureGate.Enabled(features.RemoveSelfLink) {\n\t\t// Ensure that for empty lists we don't return <nil> items.\n\t\tif meta.IsListType(obj) && meta.LenList(obj) == 0 {\n\t\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// We only generate list links on objects that implement ListInterface - historically we duck typed this\n\t// check via reflection, but as we move away from reflection we require that you not only carry Items but\n\t// ListMeta into order to be identified as a list.\n\tif !meta.IsListType(obj) {\n\t\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t\t}\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t}\n\n\turi, err := namer.GenerateListLink(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := namer.SetSelfLink(obj, uri); err != nil {\n\t\tklog.V(4).InfoS(\"Unable to set self link on object\", \"error\", err)\n\t}\n\trequestInfo, ok := request.RequestInfoFrom(ctx)\n\tif !ok {\n\t\treturn fmt.Errorf(\"missing requestInfo\")\n\t}\n\n\tcount := 0\n\terr = meta.EachListItem(obj, func(obj runtime.Object) error {\n\t\tcount++\n\t\treturn setSelfLink(obj, requestInfo, namer)\n\t})\n\n\tif count == 0 {\n\t\tif err := meta.SetList(obj, []runtime.Object{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc summarizeData(data []byte, maxLength int) string {\n\tswitch {\n\tcase len(data) == 0:\n\t\treturn \"<empty>\"\n\tcase data[0] == '{':\n\t\tif len(data) > maxLength {\n\t\t\treturn string(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn string(data)\n\tdefault:\n\t\tif len(data) > maxLength {\n\t\t\treturn hex.EncodeToString(data[:maxLength]) + \" ...\"\n\t\t}\n\t\treturn hex.EncodeToString(data)\n\t}\n}\n\nfunc limitedReadBody(req *http.Request, limit int64) ([]byte, error) {\n\tdefer req.Body.Close()\n\tif limit <= 0 {\n\t\treturn ioutil.ReadAll(req.Body)\n\t}\n\tlr := &io.LimitedReader{\n\t\tR: req.Body,\n\t\tN: limit + 1,\n\t}\n\tdata, err := ioutil.ReadAll(lr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lr.N <= 0 {\n\t\treturn nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf(\"limit is %d\", limit))\n\t}\n\treturn data, nil\n}\n\nfunc isDryRun(url *url.URL) bool {\n\treturn len(url.Query()[\"dryRun\"]) != 0\n}\n\ntype etcdError interface {\n\tCode() grpccodes.Code\n\tError() string\n}\n\ntype grpcError interface {\n\tGRPCStatus() *grpcstatus.Status\n}\n\nfunc isTooLargeError(err error) bool {\n\tif err != nil {\n\t\tif etcdErr, ok := err.(etcdError); ok {\n\t\t\tif etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == \"etcdserver: request is too large\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tif grpcErr, ok := err.(grpcError); ok {\n\t\t\tif grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), \"trying to send message larger than max\") {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "Now that we are familiar with how the `RequestScope` looks like, let's see how a resource is created in [create.go](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go).\n\n`CreateNamedResource` calls the `createHandler` function.",
"line": 207,
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "Let's look at what `createHandler` does!",
"line": 51,
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "First, the `name` and `namespace` for the resource are derived from the request.",
"line": 62,
"selection": {
"start": {
"line": 62,
"character": 1
},
"end": {
"line": 63,
"character": 1
}
},
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "Then the `NegotiatedSerializer` derives the media type (yaml, json, etc) from the request.",
"line": 82,
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "The serializer is then used to create a decoder. The decoder is responsible for:\n- Serializing the body of the request to a golang type\n- Converting the golang object to the desired version\n- Defaulting the golang object with desired values",
"line": 95,
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "`limtedReadBody` reads the request body using `ioutil.ReadAll(req.Body)`.",
"line": 97,
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"selection": {
"start": {
"line": 110,
"character": 1
},
"end": {
"line": 115,
"character": 92
}
},
"description": "Before we start decoding the object, we need to ensure that the options passed for the `Create` method (`CreateOptions`) are valid.",
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"selection": {
"start": {
"line": 119,
"character": 1
},
"end": {
"line": 120,
"character": 63
}
},
"description": "Next, we finally decode the object to a golang object.\n\nUnder the hood, this method also calls `scheme.Convert()` for conversion and `scheme.Default()` for defaulting.\n",
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"selection": {
"start": {
"line": 172,
"character": 1
},
"end": {
"line": 176,
"character": 5
}
},
"description": "After serialization, conversion and defaulting, mutating webhooks are applied.",
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"description": "After mutating webhooks, validating webhooks are applied.\n",
"line": 179,
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"file": "pkg/endpoints/handlers/create.go",
"selection": {
"start": {
"line": 152,
"character": 2
},
"end": {
"line": 160,
"character": 4
}
},
"description": "The `requestFunc` calls `validatingAdmission.Validate()` which applies validating webhooks and then creates the object.",
"contents": "/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetainternalversionscheme \"k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/validation\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/apiserver/pkg/admission\"\n\t\"k8s.io/apiserver/pkg/audit\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/finisher\"\n\t\"k8s.io/apiserver/pkg/endpoints/handlers/negotiation\"\n\t\"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\nvar namespaceGVK = schema.GroupVersionKind{Group: \"\", Version: \"v1\", Kind: \"Namespace\"}\n\nfunc createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t// For performance tracking purposes.\n\t\ttrace := utiltrace.New(\"Create\", traceFields(req)...)\n\t\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\t\tif isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) {\n\t\t\tscope.err(errors.NewBadRequest(\"the dryRun feature is disabled\"), w, req)\n\t\t\treturn\n\t\t}\n\n\t\tnamespace, name, err := scope.Namer.Name(req)\n\t\tif err != nil {\n\t\t\tif includeName {\n\t\t\t\t// name was required, return\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// otherwise attempt to look up the namespace\n\t\t\tnamespace, err = scope.Namer.Namespace(req)\n\t\t\tif err != nil {\n\t\t\t\tscope.err(err, w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided\n\t\t// timeout inside the parent context is lower than requestTimeoutUpperBound.\n\t\tctx, cancel := context.WithTimeout(req.Context(), requestTimeoutUpperBound)\n\t\tdefer cancel()\n\t\toutputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tgv := scope.Kind.GroupVersion()\n\t\ts, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tdecoder := scope.Serializer.DecoderToVersion(s.Serializer, scope.HubGroupVersion)\n\n\t\tbody, err := limitedReadBody(req, scope.MaxRequestBodyBytes)\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\toptions := &metav1.CreateOptions{}\n\t\tvalues := req.URL.Query()\n\t\tif err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {\n\t\t\terr = errors.NewBadRequest(err.Error())\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\tif errs := validation.ValidateCreateOptions(options); len(errs) > 0 {\n\t\t\terr := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: \"CreateOptions\"}, \"\", errs)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\toptions.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\n\t\tdefaultGVK := scope.Kind\n\t\toriginal := r.New()\n\t\ttrace.Step(\"About to convert to expected version\")\n\t\tobj, gvk, err := decoder.Decode(body, &defaultGVK, original)\n\t\tif err != nil {\n\t\t\terr = transformDecodeError(scope.Typer, err, original, gvk, body)\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\n\t\tobjGV := gvk.GroupVersion()\n\t\tif !scope.AcceptsGroupVersion(objGV) {\n\t\t\terr = errors.NewBadRequest(fmt.Sprintf(\"the API version in the data (%s) does not match the expected API version (%v)\", objGV.String(), gv.String()))\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Conversion done\")\n\n\t\t// On create, get name from new object if unset\n\t\tif len(name) == 0 {\n\t\t\t_, name, _ = scope.Namer.ObjectName(obj)\n\t\t}\n\t\tif len(namespace) == 0 && *gvk == namespaceGVK {\n\t\t\tnamespace = name\n\t\t}\n\t\tctx = request.WithNamespace(ctx, namespace)\n\n\t\tae := request.AuditEventFrom(ctx)\n\t\tadmit = admission.WithAudit(admit, ae)\n\t\taudit.LogRequestObject(ae, obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)\n\n\t\tuserInfo, _ := request.UserFrom(ctx)\n\n\t\ttrace.Step(\"About to store object in database\")\n\t\tadmissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)\n\t\trequestFunc := func() (runtime.Object, error) {\n\t\t\treturn r.Create(\n\t\t\t\tctx,\n\t\t\t\tname,\n\t\t\t\tobj,\n\t\t\t\trest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),\n\t\t\t\toptions,\n\t\t\t)\n\t\t}\n\t\t// Dedup owner references before updating managed fields\n\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), false)\n\t\tresult, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {\n\t\t\tif scope.FieldManager != nil {\n\t\t\t\tliveObj, err := scope.Creater.New(scope.Kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to create new object (Create for %v): %v\", scope.Kind, err)\n\t\t\t\t}\n\t\t\t\tobj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))\n\t\t\t\tadmit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)\n\t\t\t}\n\t\t\tif mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {\n\t\t\t\tif err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dedup owner references again after mutating admission happens\n\t\t\tdedupOwnerReferencesAndAddWarning(obj, req.Context(), true)\n\t\t\tresult, err := requestFunc()\n\t\t\t// If the object wasn't committed to storage because it's serialized size was too large,\n\t\t\t// it is safe to remove managedFields (which can be large) and try again.\n\t\t\tif isTooLargeError(err) {\n\t\t\t\tif accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {\n\t\t\t\t\taccessor.SetManagedFields(nil)\n\t\t\t\t\tresult, err = requestFunc()\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result, err\n\t\t})\n\t\tif err != nil {\n\t\t\tscope.err(err, w, req)\n\t\t\treturn\n\t\t}\n\t\ttrace.Step(\"Object stored in database\")\n\n\t\tcode := http.StatusCreated\n\t\tstatus, ok := result.(*metav1.Status)\n\t\tif ok && err == nil && status.Code == 0 {\n\t\t\tstatus.Code = int32(code)\n\t\t}\n\n\t\ttransformResponseObject(ctx, scope, trace, req, w, code, outputMediaType, result)\n\t}\n}\n\n// CreateNamedResource returns a function that will handle a resource creation with name.\nfunc CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(r, scope, admission, true)\n}\n\n// CreateResource returns a function that will handle a resource creation.\nfunc CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {\n\treturn createHandler(&namedCreaterAdapter{r}, scope, admission, false)\n}\n\ntype namedCreaterAdapter struct {\n\trest.Creater\n}\n\nfunc (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\treturn c.Creater.Create(ctx, obj, createValidatingAdmission, options)\n}\n\n// manager is assumed to be already a valid value, we need to make\n// userAgent into a valid value too.\nfunc managerOrUserAgent(manager, userAgent string) string {\n\tif manager != \"\" {\n\t\treturn manager\n\t}\n\treturn prefixFromUserAgent(userAgent)\n}\n\n// prefixFromUserAgent takes the characters preceding the first /, quote\n// unprintable character and then trim what's beyond the\n// FieldManagerMaxLength limit.\nfunc prefixFromUserAgent(u string) string {\n\tm := strings.Split(u, \"/\")[0]\n\tbuf := bytes.NewBuffer(nil)\n\tfor _, r := range m {\n\t\t// Ignore non-printable characters\n\t\tif !unicode.IsPrint(r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Only append if we have room for it\n\t\tif buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {\n\t\t\tbreak\n\t\t}\n\t\tbuf.WriteRune(r)\n\t}\n\treturn buf.String()\n}\n"
},
{
"directory": "pkg/registry/generic/registry",
"description": ""
},
{
"file": "pkg/registry/generic/registry/store.go",
"description": "For example, let's look at the `Create` method in the generic registry of the apiserver.",
"line": 362,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\t\"k8s.io/apimachinery/pkg/api/validation/path\"\n\tmetainternalversion \"k8s.io/apimachinery/pkg/apis/meta/internalversion\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/fields\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/sets\"\n\t\"k8s.io/apimachinery/pkg/util/validation/field\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\tgenericapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/registry/generic\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/storage\"\n\tstoreerr \"k8s.io/apiserver/pkg/storage/errors\"\n\t\"k8s.io/apiserver/pkg/storage/etcd3/metrics\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"sigs.k8s.io/structured-merge-diff/v4/fieldpath\"\n\n\t\"k8s.io/klog/v2\"\n)\n\n// FinishFunc is a function returned by Begin hooks to complete an operation.\ntype FinishFunc func(ctx context.Context, success bool)\n\n// AfterDeleteFunc is the type used for the Store.AfterDelete hook.\ntype AfterDeleteFunc func(obj runtime.Object, options *metav1.DeleteOptions)\n\n// BeginCreateFunc is the type used for the Store.BeginCreate hook.\ntype BeginCreateFunc func(ctx context.Context, obj runtime.Object, options *metav1.CreateOptions) (FinishFunc, error)\n\n// AfterCreateFunc is the type used for the Store.AfterCreate hook.\ntype AfterCreateFunc func(obj runtime.Object, options *metav1.CreateOptions)\n\n// BeginUpdateFunc is the type used for the Store.BeginUpdate hook.\ntype BeginUpdateFunc func(ctx context.Context, obj, old runtime.Object, options *metav1.UpdateOptions) (FinishFunc, error)\n\n// AfterUpdateFunc is the type used for the Store.AfterUpdate hook.\ntype AfterUpdateFunc func(obj runtime.Object, options *metav1.UpdateOptions)\n\n// GenericStore interface can be used for type assertions when we need to access the underlying strategies.\ntype GenericStore interface {\n\tGetCreateStrategy() rest.RESTCreateStrategy\n\tGetUpdateStrategy() rest.RESTUpdateStrategy\n\tGetDeleteStrategy() rest.RESTDeleteStrategy\n}\n\n// Store implements k8s.io/apiserver/pkg/registry/rest.StandardStorage. It's\n// intended to be embeddable and allows the consumer to implement any\n// non-generic functions that are required. This object is intended to be\n// copyable so that it can be used in different ways but share the same\n// underlying behavior.\n//\n// All fields are required unless specified.\n//\n// The intended use of this type is embedding within a Kind specific\n// RESTStorage implementation. This type provides CRUD semantics on a Kubelike\n// resource, handling details like conflict detection with ResourceVersion and\n// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and\n// RESTDeleteStrategy are generic across all backends, and encapsulate logic\n// specific to the API.\n//\n// TODO: make the default exposed methods exactly match a generic RESTStorage\ntype Store struct {\n\t// NewFunc returns a new instance of the type this registry returns for a\n\t// GET of a single object, e.g.:\n\t//\n\t// curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object\n\tNewFunc func() runtime.Object\n\n\t// NewListFunc returns a new list of the type this registry; it is the\n\t// type returned when the resource is listed, e.g.:\n\t//\n\t// curl GET /apis/group/version/namespaces/my-ns/myresource\n\tNewListFunc func() runtime.Object\n\n\t// DefaultQualifiedResource is the pluralized name of the resource.\n\t// This field is used if there is no request info present in the context.\n\t// See qualifiedResourceFromContext for details.\n\tDefaultQualifiedResource schema.GroupResource\n\n\t// KeyRootFunc returns the root etcd key for this resource; should not\n\t// include trailing \"/\". This is used for operations that work on the\n\t// entire collection (listing and watching).\n\t//\n\t// KeyRootFunc and KeyFunc must be supplied together or not at all.\n\tKeyRootFunc func(ctx context.Context) string\n\n\t// KeyFunc returns the key for a specific object in the collection.\n\t// KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace'\n\t// can be gotten from ctx.\n\t//\n\t// KeyFunc and KeyRootFunc must be supplied together or not at all.\n\tKeyFunc func(ctx context.Context, name string) (string, error)\n\n\t// ObjectNameFunc returns the name of an object or an error.\n\tObjectNameFunc func(obj runtime.Object) (string, error)\n\n\t// TTLFunc returns the TTL (time to live) that objects should be persisted\n\t// with. The existing parameter is the current TTL or the default for this\n\t// operation. The update parameter indicates whether this is an operation\n\t// against an existing object.\n\t//\n\t// Objects that are persisted with a TTL are evicted once the TTL expires.\n\tTTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error)\n\n\t// PredicateFunc returns a matcher corresponding to the provided labels\n\t// and fields. The SelectionPredicate returned should return true if the\n\t// object matches the given field and label selectors.\n\tPredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate\n\n\t// EnableGarbageCollection affects the handling of Update and Delete\n\t// requests. Enabling garbage collection allows finalizers to do work to\n\t// finalize this object before the store deletes it.\n\t//\n\t// If any store has garbage collection enabled, it must also be enabled in\n\t// the kube-controller-manager.\n\tEnableGarbageCollection bool\n\n\t// DeleteCollectionWorkers is the maximum number of workers in a single\n\t// DeleteCollection call. Delete requests for the items in a collection\n\t// are issued in parallel.\n\tDeleteCollectionWorkers int\n\n\t// Decorator is an optional exit hook on an object returned from the\n\t// underlying storage. The returned object could be an individual object\n\t// (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for\n\t// integrations that are above storage and should only be used for\n\t// specific cases where storage of the value is not appropriate, since\n\t// they cannot be watched.\n\tDecorator func(runtime.Object)\n\n\t// CreateStrategy implements resource-specific behavior during creation.\n\tCreateStrategy rest.RESTCreateStrategy\n\t// BeginCreate is an optional hook that returns a \"transaction-like\"\n\t// commit/revert function which will be called at the end of the operation,\n\t// but before AfterCreate and Decorator, indicating via the argument\n\t// whether the operation succeeded. If this returns an error, the function\n\t// is not called. Almost nobody should use this hook.\n\tBeginCreate BeginCreateFunc\n\t// AfterCreate implements a further operation to run after a resource is\n\t// created and before it is decorated, optional.\n\tAfterCreate AfterCreateFunc\n\n\t// UpdateStrategy implements resource-specific behavior during updates.\n\tUpdateStrategy rest.RESTUpdateStrategy\n\t// BeginUpdate is an optional hook that returns a \"transaction-like\"\n\t// commit/revert function which will be called at the end of the operation,\n\t// but before AfterUpdate and Decorator, indicating via the argument\n\t// whether the operation succeeded. If this returns an error, the function\n\t// is not called. Almost nobody should use this hook.\n\tBeginUpdate BeginUpdateFunc\n\t// AfterUpdate implements a further operation to run after a resource is\n\t// updated and before it is decorated, optional.\n\tAfterUpdate AfterUpdateFunc\n\n\t// DeleteStrategy implements resource-specific behavior during deletion.\n\tDeleteStrategy rest.RESTDeleteStrategy\n\t// AfterDelete implements a further operation to run after a resource is\n\t// deleted and before it is decorated, optional.\n\tAfterDelete AfterDeleteFunc\n\t// ReturnDeletedObject determines whether the Store returns the object\n\t// that was deleted. Otherwise, return a generic success status response.\n\tReturnDeletedObject bool\n\t// ShouldDeleteDuringUpdate is an optional function to determine whether\n\t// an update from existing to obj should result in a delete.\n\t// If specified, this is checked in addition to standard finalizer,\n\t// deletionTimestamp, and deletionGracePeriodSeconds checks.\n\tShouldDeleteDuringUpdate func(ctx context.Context, key string, obj, existing runtime.Object) bool\n\n\t// TableConvertor is an optional interface for transforming items or lists\n\t// of items into tabular output. If unset, the default will be used.\n\tTableConvertor rest.TableConvertor\n\n\t// ResetFieldsStrategy provides the fields reset by the strategy that\n\t// should not be modified by the user.\n\tResetFieldsStrategy rest.ResetFieldsStrategy\n\n\t// Storage is the interface for the underlying storage for the\n\t// resource. It is wrapped into a \"DryRunnableStorage\" that will\n\t// either pass-through or simply dry-run.\n\tStorage DryRunnableStorage\n\t// StorageVersioner outputs the <group/version/kind> an object will be\n\t// converted to before persisted in etcd, given a list of possible\n\t// kinds of the object.\n\t// If the StorageVersioner is nil, apiserver will leave the\n\t// storageVersionHash as empty in the discovery document.\n\tStorageVersioner runtime.GroupVersioner\n\t// Called to cleanup clients used by the underlying Storage; optional.\n\tDestroyFunc func()\n}\n\n// Note: the rest.StandardStorage interface aggregates the common REST verbs\nvar _ rest.StandardStorage = &Store{}\nvar _ rest.TableConvertor = &Store{}\nvar _ GenericStore = &Store{}\n\nconst (\n\tOptimisticLockErrorMsg = \"the object has been modified; please apply your changes to the latest version and try again\"\n\tresourceCountPollPeriodJitter = 1.2\n)\n\n// NamespaceKeyRootFunc is the default function for constructing storage paths\n// to resource directories enforcing namespace rules.\nfunc NamespaceKeyRootFunc(ctx context.Context, prefix string) string {\n\tkey := prefix\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif ok && len(ns) > 0 {\n\t\tkey = key + \"/\" + ns\n\t}\n\treturn key\n}\n\n// NamespaceKeyFunc is the default function for constructing storage paths to\n// a resource relative to the given prefix enforcing namespace rules. If the\n// context does not contain a namespace, it errors.\nfunc NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {\n\tkey := NamespaceKeyRootFunc(ctx, prefix)\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif !ok || len(ns) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Namespace parameter required.\")\n\t}\n\tif len(name) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Name parameter required.\")\n\t}\n\tif msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {\n\t\treturn \"\", apierrors.NewBadRequest(fmt.Sprintf(\"Name parameter invalid: %q: %s\", name, strings.Join(msgs, \";\")))\n\t}\n\tkey = key + \"/\" + name\n\treturn key, nil\n}\n\n// NoNamespaceKeyFunc is the default function for constructing storage paths\n// to a resource relative to the given prefix without a namespace.\nfunc NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {\n\tif len(name) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Name parameter required.\")\n\t}\n\tif msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {\n\t\treturn \"\", apierrors.NewBadRequest(fmt.Sprintf(\"Name parameter invalid: %q: %s\", name, strings.Join(msgs, \";\")))\n\t}\n\tkey := prefix + \"/\" + name\n\treturn key, nil\n}\n\n// New implements RESTStorage.New.\nfunc (e *Store) New() runtime.Object {\n\treturn e.NewFunc()\n}\n\n// NewList implements rest.Lister.\nfunc (e *Store) NewList() runtime.Object {\n\treturn e.NewListFunc()\n}\n\n// NamespaceScoped indicates whether the resource is namespaced\nfunc (e *Store) NamespaceScoped() bool {\n\tif e.CreateStrategy != nil {\n\t\treturn e.CreateStrategy.NamespaceScoped()\n\t}\n\tif e.UpdateStrategy != nil {\n\t\treturn e.UpdateStrategy.NamespaceScoped()\n\t}\n\n\tpanic(\"programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too\")\n}\n\n// GetCreateStrategy implements GenericStore.\nfunc (e *Store) GetCreateStrategy() rest.RESTCreateStrategy {\n\treturn e.CreateStrategy\n}\n\n// GetUpdateStrategy implements GenericStore.\nfunc (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy {\n\treturn e.UpdateStrategy\n}\n\n// GetDeleteStrategy implements GenericStore.\nfunc (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy {\n\treturn e.DeleteStrategy\n}\n\n// List returns a list of items matching labels and field according to the\n// store's PredicateFunc.\nfunc (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {\n\tlabel := labels.Everything()\n\tif options != nil && options.LabelSelector != nil {\n\t\tlabel = options.LabelSelector\n\t}\n\tfield := fields.Everything()\n\tif options != nil && options.FieldSelector != nil {\n\t\tfield = options.FieldSelector\n\t}\n\tout, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, nil\n}\n\n// ListPredicate returns a list of all the items matching the given\n// SelectionPredicate.\nfunc (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) {\n\tif options == nil {\n\t\t// By default we should serve the request from etcd.\n\t\toptions = &metainternalversion.ListOptions{ResourceVersion: \"\"}\n\t}\n\tp.Limit = options.Limit\n\tp.Continue = options.Continue\n\tlist := e.NewListFunc()\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tstorageOpts := storage.ListOptions{ResourceVersion: options.ResourceVersion, ResourceVersionMatch: options.ResourceVersionMatch, Predicate: p}\n\tif name, ok := p.MatchesSingle(); ok {\n\t\tif key, err := e.KeyFunc(ctx, name); err == nil {\n\t\t\terr := e.Storage.GetToList(ctx, key, storageOpts, list)\n\t\t\treturn list, storeerr.InterpretListError(err, qualifiedResource)\n\t\t}\n\t\t// if we cannot extract a key based on the current context, the optimization is skipped\n\t}\n\n\terr := e.Storage.List(ctx, e.KeyRootFunc(ctx), storageOpts, list)\n\treturn list, storeerr.InterpretListError(err, qualifiedResource)\n}\n\n// finishNothing is a do-nothing FinishFunc.\nfunc finishNothing(context.Context, bool) {}\n\n// Create inserts a new item according to the unique key from the object.\nfunc (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\tvar finishCreate FinishFunc = finishNothing\n\n\tif e.BeginCreate != nil {\n\t\tfn, err := e.BeginCreate(ctx, obj, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfinishCreate = fn\n\t\tdefer func() {\n\t\t\tfinishCreate(ctx, false)\n\t\t}()\n\t}\n\n\tif err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {\n\t\treturn nil, err\n\t}\n\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t// handling chain wants to enforce.\n\tif createValidation != nil {\n\t\tif err := createValidation(ctx, obj.DeepCopyObject()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tname, err := e.ObjectNameFunc(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tttl, err := e.calculateTTL(obj, 0, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := e.NewFunc()\n\tif err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil {\n\t\terr = storeerr.InterpretCreateError(err, qualifiedResource, name)\n\t\terr = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)\n\t\tif !apierrors.IsAlreadyExists(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\tif errGet := e.Storage.Get(ctx, key, storage.GetOptions{}, out); errGet != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taccessor, errGetAcc := meta.Accessor(out)\n\t\tif errGetAcc != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif accessor.GetDeletionTimestamp() != nil {\n\t\t\tmsg := &err.(*apierrors.StatusError).ErrStatus.Message\n\t\t\t*msg = fmt.Sprintf(\"object is being deleted: %s\", *msg)\n\t\t}\n\t\treturn nil, err\n\t}\n\t// The operation has succeeded. Call the finish function if there is one,\n\t// and then make sure the defer doesn't call it again.\n\tfn := finishCreate\n\tfinishCreate = finishNothing\n\tfn(ctx, true)\n\n\tif e.AfterCreate != nil {\n\t\te.AfterCreate(out, options)\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, nil\n}\n\n// ShouldDeleteDuringUpdate is the default function for\n// checking if an object should be deleted during an update.\n// It checks if the new object has no finalizers,\n// the existing object's deletionTimestamp is set, and\n// the existing object's deletionGracePeriodSeconds is 0 or nil\nfunc ShouldDeleteDuringUpdate(ctx context.Context, key string, obj, existing runtime.Object) bool {\n\tnewMeta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\toldMeta, err := meta.Accessor(existing)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\tif len(newMeta.GetFinalizers()) > 0 {\n\t\t// don't delete with finalizers remaining in the new object\n\t\treturn false\n\t}\n\tif oldMeta.GetDeletionTimestamp() == nil {\n\t\t// don't delete if the existing object hasn't had a delete request made\n\t\treturn false\n\t}\n\t// delete if the existing object has no grace period or a grace period of 0\n\treturn oldMeta.GetDeletionGracePeriodSeconds() == nil || *oldMeta.GetDeletionGracePeriodSeconds() == 0\n}\n\n// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list.\n// Used for objects that are either been finalized or have never initialized.\nfunc (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, options *metav1.DeleteOptions) (runtime.Object, bool, error) {\n\tout := e.NewFunc()\n\tklog.V(6).Infof(\"going to delete %s from registry, triggered by update\", name)\n\t// Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb.\n\tif err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryrun.IsDryRun(options.DryRun), nil); err != nil {\n\t\t// Deletion is racy, i.e., there could be multiple update\n\t\t// requests to remove all finalizers from the object, so we\n\t\t// ignore the NotFound error.\n\t\tif storage.IsNotFound(err) {\n\t\t\t_, err := e.finalizeDelete(ctx, obj, true, options)\n\t\t\t// clients are expecting an updated object if a PUT succeeded,\n\t\t\t// but finalizeDelete returns a metav1.Status, so return\n\t\t\t// the object in the request instead.\n\t\t\treturn obj, false, err\n\t\t}\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name)\n\t}\n\t_, err := e.finalizeDelete(ctx, out, true, options)\n\t// clients are expecting an updated object if a PUT succeeded, but\n\t// finalizeDelete returns a metav1.Status, so return the object in\n\t// the request instead.\n\treturn obj, false, err\n}\n\n// Update performs an atomic update and set of the object. Returns the result of the update\n// or an error. If the registry allows create-on-update, the create flow will be executed.\n// A bool is returned along with the object and any errors, to indicate object creation.\nfunc (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar (\n\t\tcreatingObj runtime.Object\n\t\tcreating = false\n\t)\n\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tstoragePreconditions := &storage.Preconditions{}\n\tif preconditions := objInfo.Preconditions(); preconditions != nil {\n\t\tstoragePreconditions.UID = preconditions.UID\n\t\tstoragePreconditions.ResourceVersion = preconditions.ResourceVersion\n\t}\n\n\tout := e.NewFunc()\n\t// deleteObj is only used in case a deletion is carried out\n\tvar deleteObj runtime.Object\n\terr = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {\n\t\texistingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif existingResourceVersion == 0 {\n\t\t\tif !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate {\n\t\t\t\treturn nil, nil, apierrors.NewNotFound(qualifiedResource, name)\n\t\t\t}\n\t\t}\n\n\t\t// Given the existing object, get the new object\n\t\tobj, err := objInfo.UpdatedObject(ctx, existing)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// If AllowUnconditionalUpdate() is true and the object specified by\n\t\t// the user does not have a resource version, then we populate it with\n\t\t// the latest version. Else, we check that the version specified by\n\t\t// the user matches the version of latest storage object.\n\t\tnewResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tdoUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()\n\n\t\tif existingResourceVersion == 0 {\n\t\t\tvar finishCreate FinishFunc = finishNothing\n\n\t\t\tif e.BeginCreate != nil {\n\t\t\t\tfn, err := e.BeginCreate(ctx, obj, newCreateOptionsFromUpdateOptions(options))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t\tfinishCreate = fn\n\t\t\t\tdefer func() {\n\t\t\t\t\tfinishCreate(ctx, false)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\tcreating = true\n\t\t\tcreatingObj = obj\n\t\t\tif err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t\t\t// handling chain wants to enforce.\n\t\t\tif createValidation != nil {\n\t\t\t\tif err := createValidation(ctx, obj.DeepCopyObject()); err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tttl, err := e.calculateTTL(obj, 0, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\t// The operation has succeeded. Call the finish function if there is one,\n\t\t\t// and then make sure the defer doesn't call it again.\n\t\t\tfn := finishCreate\n\t\t\tfinishCreate = finishNothing\n\t\t\tfn(ctx, true)\n\n\t\t\treturn obj, &ttl, nil\n\t\t}\n\n\t\tcreating = false\n\t\tcreatingObj = nil\n\t\tif doUnconditionalUpdate {\n\t\t\t// Update the object's resource version to match the latest\n\t\t\t// storage object's resource version.\n\t\t\terr = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// Check if the object's resource version matches the latest\n\t\t\t// resource version.\n\t\t\tif newResourceVersion == 0 {\n\t\t\t\t// TODO: The Invalid error should have a field for Resource.\n\t\t\t\t// After that field is added, we should fill the Resource and\n\t\t\t\t// leave the Kind field empty. See the discussion in #18526.\n\t\t\t\tqualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}\n\t\t\t\tfieldErrList := field.ErrorList{field.Invalid(field.NewPath(\"metadata\").Child(\"resourceVersion\"), newResourceVersion, \"must be specified for an update\")}\n\t\t\t\treturn nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList)\n\t\t\t}\n\t\t\tif newResourceVersion != existingResourceVersion {\n\t\t\t\treturn nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))\n\t\t\t}\n\t\t}\n\n\t\tvar finishUpdate FinishFunc = finishNothing\n\n\t\tif e.BeginUpdate != nil {\n\t\t\tfn, err := e.BeginUpdate(ctx, obj, existing, options)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tfinishUpdate = fn\n\t\t\tdefer func() {\n\t\t\t\tfinishUpdate(ctx, false)\n\t\t\t}()\n\t\t}\n\n\t\tif err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t\t// handling chain wants to enforce.\n\t\tif updateValidation != nil {\n\t\t\tif err := updateValidation(ctx, obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\t// Check the default delete-during-update conditions, and store-specific conditions if provided\n\t\tif ShouldDeleteDuringUpdate(ctx, key, obj, existing) &&\n\t\t\t(e.ShouldDeleteDuringUpdate == nil || e.ShouldDeleteDuringUpdate(ctx, key, obj, existing)) {\n\t\t\tdeleteObj = obj\n\t\t\treturn nil, nil, errEmptiedFinalizers\n\t\t}\n\t\tttl, err := e.calculateTTL(obj, res.TTL, true)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// The operation has succeeded. Call the finish function if there is one,\n\t\t// and then make sure the defer doesn't call it again.\n\t\tfn := finishUpdate\n\t\tfinishUpdate = finishNothing\n\t\tfn(ctx, true)\n\n\t\tif int64(ttl) != res.TTL {\n\t\t\treturn obj, &ttl, nil\n\t\t}\n\t\treturn obj, nil, nil\n\t}, dryrun.IsDryRun(options.DryRun), nil)\n\n\tif err != nil {\n\t\t// delete the object\n\t\tif err == errEmptiedFinalizers {\n\t\t\treturn e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions, newDeleteOptionsFromUpdateOptions(options))\n\t\t}\n\t\tif creating {\n\t\t\terr = storeerr.InterpretCreateError(err, qualifiedResource, name)\n\t\t\terr = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj)\n\t\t} else {\n\t\t\terr = storeerr.InterpretUpdateError(err, qualifiedResource, name)\n\t\t}\n\t\treturn nil, false, err\n\t}\n\n\tif creating {\n\t\tif e.AfterCreate != nil {\n\t\t\te.AfterCreate(out, newCreateOptionsFromUpdateOptions(options))\n\t\t}\n\t} else {\n\t\tif e.AfterUpdate != nil {\n\t\t\te.AfterUpdate(out, options)\n\t\t}\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, creating, nil\n}\n\n// This is a helper to convert UpdateOptions to CreateOptions for the\n// create-on-update path.\nfunc newCreateOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.CreateOptions {\n\tco := &metav1.CreateOptions{\n\t\tDryRun: in.DryRun,\n\t\tFieldManager: in.FieldManager,\n\t}\n\tco.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\treturn co\n}\n\n// This is a helper to convert UpdateOptions to DeleteOptions for the\n// delete-on-update path.\nfunc newDeleteOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.DeleteOptions {\n\tdo := &metav1.DeleteOptions{\n\t\tDryRun: in.DryRun,\n\t}\n\tdo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"DeleteOptions\"))\n\treturn do\n}\n\n// Get retrieves the item from storage.\nfunc (e *Store) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {\n\tobj := e.NewFunc()\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := e.Storage.Get(ctx, key, storage.GetOptions{ResourceVersion: options.ResourceVersion}, obj); err != nil {\n\t\treturn nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name)\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(obj)\n\t}\n\treturn obj, nil\n}\n\n// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info.\n// If the context has no request info, DefaultQualifiedResource is used.\nfunc (e *Store) qualifiedResourceFromContext(ctx context.Context) schema.GroupResource {\n\tif info, ok := genericapirequest.RequestInfoFrom(ctx); ok {\n\t\treturn schema.GroupResource{Group: info.APIGroup, Resource: info.Resource}\n\t}\n\t// some implementations access storage directly and thus the context has no RequestInfo\n\treturn e.DefaultQualifiedResource\n}\n\nvar (\n\terrAlreadyDeleting = fmt.Errorf(\"abort delete\")\n\terrDeleteNow = fmt.Errorf(\"delete now\")\n\terrEmptiedFinalizers = fmt.Errorf(\"emptied finalizers\")\n)\n\n// shouldOrphanDependents returns true if the finalizer for orphaning should be set\n// updated for FinalizerOrphanDependents. In the order of highest to lowest\n// priority, there are three factors affect whether to add/remove the\n// FinalizerOrphanDependents: options, existing finalizers of the object,\n// and e.DeleteStrategy.DefaultGarbageCollectionPolicy.\nfunc shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {\n\t// Get default GC policy from this REST object type\n\tgcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy)\n\tvar defaultGCPolicy rest.GarbageCollectionPolicy\n\tif ok {\n\t\tdefaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx)\n\t}\n\n\tif defaultGCPolicy == rest.Unsupported {\n\t\t// return false to indicate that we should NOT orphan\n\t\treturn false\n\t}\n\n\t// An explicit policy was set at deletion time, that overrides everything\n\t//lint:ignore SA1019 backwards compatibility\n\tif options != nil && options.OrphanDependents != nil {\n\t\t//lint:ignore SA1019 backwards compatibility\n\t\treturn *options.OrphanDependents\n\t}\n\tif options != nil && options.PropagationPolicy != nil {\n\t\tswitch *options.PropagationPolicy {\n\t\tcase metav1.DeletePropagationOrphan:\n\t\t\treturn true\n\t\tcase metav1.DeletePropagationBackground, metav1.DeletePropagationForeground:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If a finalizer is set in the object, it overrides the default\n\t// validation should make sure the two cases won't be true at the same time.\n\tfinalizers := accessor.GetFinalizers()\n\tfor _, f := range finalizers {\n\t\tswitch f {\n\t\tcase metav1.FinalizerOrphanDependents:\n\t\t\treturn true\n\t\tcase metav1.FinalizerDeleteDependents:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Get default orphan policy from this REST object type if it exists\n\treturn defaultGCPolicy == rest.OrphanDependents\n}\n\n// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set\n// updated for FinalizerDeleteDependents. In the order of highest to lowest\n// priority, there are three factors affect whether to add/remove the\n// FinalizerDeleteDependents: options, existing finalizers of the object, and\n// e.DeleteStrategy.DefaultGarbageCollectionPolicy.\nfunc shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {\n\t// Get default GC policy from this REST object type\n\tif gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported {\n\t\t// return false to indicate that we should NOT delete in foreground\n\t\treturn false\n\t}\n\n\t// If an explicit policy was set at deletion time, that overrides both\n\t//lint:ignore SA1019 backwards compatibility\n\tif options != nil && options.OrphanDependents != nil {\n\t\treturn false\n\t}\n\tif options != nil && options.PropagationPolicy != nil {\n\t\tswitch *options.PropagationPolicy {\n\t\tcase metav1.DeletePropagationForeground:\n\t\t\treturn true\n\t\tcase metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If a finalizer is set in the object, it overrides the default\n\t// validation has made sure the two cases won't be true at the same time.\n\tfinalizers := accessor.GetFinalizers()\n\tfor _, f := range finalizers {\n\t\tswitch f {\n\t\tcase metav1.FinalizerDeleteDependents:\n\t\t\treturn true\n\t\tcase metav1.FinalizerOrphanDependents:\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn false\n}\n\n// deletionFinalizersForGarbageCollection analyzes the object and delete options\n// to determine whether the object is in need of finalization by the garbage\n// collector. If so, returns the set of deletion finalizers to apply and a bool\n// indicating whether the finalizer list has changed and is in need of updating.\n//\n// The finalizers returned are intended to be handled by the garbage collector.\n// If garbage collection is disabled for the store, this function returns false\n// to ensure finalizers aren't set which will never be cleared.\nfunc deletionFinalizersForGarbageCollection(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) {\n\tif !e.EnableGarbageCollection {\n\t\treturn false, []string{}\n\t}\n\tshouldOrphan := shouldOrphanDependents(ctx, e, accessor, options)\n\tshouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options)\n\tnewFinalizers := []string{}\n\n\t// first remove both finalizers, add them back if needed.\n\tfor _, f := range accessor.GetFinalizers() {\n\t\tif f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents {\n\t\t\tcontinue\n\t\t}\n\t\tnewFinalizers = append(newFinalizers, f)\n\t}\n\n\tif shouldOrphan {\n\t\tnewFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents)\n\t}\n\tif shouldDeleteDependentInForeground {\n\t\tnewFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents)\n\t}\n\n\toldFinalizerSet := sets.NewString(accessor.GetFinalizers()...)\n\tnewFinalizersSet := sets.NewString(newFinalizers...)\n\tif oldFinalizerSet.Equal(newFinalizersSet) {\n\t\treturn false, accessor.GetFinalizers()\n\t}\n\treturn true, newFinalizers\n}\n\n// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the\n// DeletionTimestamp to \"now\" if there is no existing deletionTimestamp or if the existing\n// deletionTimestamp is further in future. Finalizers are watching for such updates and will\n// finalize the object if their IDs are present in the object's Finalizers list.\nfunc markAsDeleting(obj runtime.Object, now time.Time) (err error) {\n\tobjectMeta, kerr := meta.Accessor(obj)\n\tif kerr != nil {\n\t\treturn kerr\n\t}\n\t// This handles Generation bump for resources that don't support graceful\n\t// deletion. For resources that support graceful deletion is handle in\n\t// pkg/api/rest/delete.go\n\tif objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 {\n\t\tobjectMeta.SetGeneration(objectMeta.GetGeneration() + 1)\n\t}\n\texistingDeletionTimestamp := objectMeta.GetDeletionTimestamp()\n\tif existingDeletionTimestamp == nil || existingDeletionTimestamp.After(now) {\n\t\tmetaNow := metav1.NewTime(now)\n\t\tobjectMeta.SetDeletionTimestamp(&metaNow)\n\t}\n\tvar zero int64 = 0\n\tobjectMeta.SetDeletionGracePeriodSeconds(&zero)\n\treturn nil\n}\n\n// updateForGracefulDeletionAndFinalizers updates the given object for\n// graceful deletion and finalization by setting the deletion timestamp and\n// grace period seconds (graceful deletion) and updating the list of\n// finalizers (finalization); it returns:\n//\n// 1. an error\n// 2. a boolean indicating that the object was not found, but it should be\n// ignored\n// 3. a boolean indicating that the object's grace period is exhausted and it\n// should be deleted immediately\n// 4. a new output object with the state that was updated\n// 5. a copy of the last existing state of the object\nfunc (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {\n\tlastGraceful := int64(0)\n\tvar pendingFinalizers bool\n\tout = e.NewFunc()\n\terr = e.Storage.GuaranteedUpdate(\n\t\tctx,\n\t\tkey,\n\t\tout,\n\t\tfalse, /* ignoreNotFound */\n\t\t&preconditions,\n\t\tstorage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) {\n\t\t\tif err := deleteValidation(ctx, existing); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgraceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif pendingGraceful {\n\t\t\t\treturn nil, errAlreadyDeleting\n\t\t\t}\n\n\t\t\t// Add/remove the orphan finalizer as the options dictates.\n\t\t\t// Note that this occurs after checking pendingGraceufl, so\n\t\t\t// finalizers cannot be updated via DeleteOptions if deletion has\n\t\t\t// started.\n\t\t\texistingAccessor, err := meta.Accessor(existing)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tneedsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options)\n\t\t\tif needsUpdate {\n\t\t\t\texistingAccessor.SetFinalizers(newFinalizers)\n\t\t\t}\n\n\t\t\tpendingFinalizers = len(existingAccessor.GetFinalizers()) != 0\n\t\t\tif !graceful {\n\t\t\t\t// set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion\n\t\t\t\tif pendingFinalizers {\n\t\t\t\t\tklog.V(6).Infof(\"update the DeletionTimestamp to \\\"now\\\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers\", name)\n\t\t\t\t\terr = markAsDeleting(existing, time.Now())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn existing, nil\n\t\t\t\t}\n\t\t\t\treturn nil, errDeleteNow\n\t\t\t}\n\t\t\tlastGraceful = *options.GracePeriodSeconds\n\t\t\tlastExisting = existing\n\t\t\treturn existing, nil\n\t\t}),\n\t\tdryrun.IsDryRun(options.DryRun),\n\t\tnil,\n\t)\n\tswitch err {\n\tcase nil:\n\t\t// If there are pending finalizers, we never delete the object immediately.\n\t\tif pendingFinalizers {\n\t\t\treturn nil, false, false, out, lastExisting\n\t\t}\n\t\tif lastGraceful > 0 {\n\t\t\treturn nil, false, false, out, lastExisting\n\t\t}\n\t\t// If we are here, the registry supports grace period mechanism and\n\t\t// we are intentionally delete gracelessly. In this case, we may\n\t\t// enter a race with other k8s components. If other component wins\n\t\t// the race, the object will not be found, and we should tolerate\n\t\t// the NotFound error. See\n\t\t// https://github.com/kubernetes/kubernetes/issues/19403 for\n\t\t// details.\n\t\treturn nil, true, true, out, lastExisting\n\tcase errDeleteNow:\n\t\t// we've updated the object to have a zero grace period, or it's already at 0, so\n\t\t// we should fall through and truly delete the object.\n\t\treturn nil, false, true, out, lastExisting\n\tcase errAlreadyDeleting:\n\t\tout, err = e.finalizeDelete(ctx, in, true, options)\n\t\treturn err, false, false, out, lastExisting\n\tdefault:\n\t\treturn storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting\n\t}\n}\n\n// Delete removes the item from storage.\nfunc (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tobj := e.NewFunc()\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tif err = e.Storage.Get(ctx, key, storage.GetOptions{}, obj); err != nil {\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)\n\t}\n\n\t// support older consumers of delete by treating \"nil\" as delete immediately\n\tif options == nil {\n\t\toptions = metav1.NewDeleteOptions(0)\n\t}\n\tvar preconditions storage.Preconditions\n\tif options.Preconditions != nil {\n\t\tpreconditions.UID = options.Preconditions.UID\n\t\tpreconditions.ResourceVersion = options.Preconditions.ResourceVersion\n\t}\n\tgraceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\t// this means finalizers cannot be updated via DeleteOptions if a deletion is already pending\n\tif pendingGraceful {\n\t\tout, err := e.finalizeDelete(ctx, obj, false, options)\n\t\treturn out, false, err\n\t}\n\t// check if obj has pending finalizers\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, false, apierrors.NewInternalError(err)\n\t}\n\tpendingFinalizers := len(accessor.GetFinalizers()) != 0\n\tvar ignoreNotFound bool\n\tvar deleteImmediately bool = true\n\tvar lastExisting, out runtime.Object\n\n\t// Handle combinations of graceful deletion and finalization by issuing\n\t// the correct updates.\n\tshouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options)\n\t// TODO: remove the check, because we support no-op updates now.\n\tif graceful || pendingFinalizers || shouldUpdateFinalizers {\n\t\terr, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, deleteValidation, obj)\n\t\t// Update the preconditions.ResourceVersion if set since we updated the object.\n\t\tif err == nil && deleteImmediately && preconditions.ResourceVersion != nil {\n\t\t\taccessor, err = meta.Accessor(out)\n\t\t\tif err != nil {\n\t\t\t\treturn out, false, apierrors.NewInternalError(err)\n\t\t\t}\n\t\t\tresourceVersion := accessor.GetResourceVersion()\n\t\t\tpreconditions.ResourceVersion = &resourceVersion\n\t\t}\n\t}\n\n\t// !deleteImmediately covers all cases where err != nil. We keep both to be future-proof.\n\tif !deleteImmediately || err != nil {\n\t\treturn out, false, err\n\t}\n\n\t// Going further in this function is not useful when we are\n\t// performing a dry-run request. Worse, it will actually\n\t// override \"out\" with the version of the object in database\n\t// that doesn't have the finalizer and deletiontimestamp set\n\t// (because the update above was dry-run too). If we already\n\t// have that version available, let's just return it now,\n\t// otherwise, we can call dry-run delete that will get us the\n\t// latest version of the object.\n\tif dryrun.IsDryRun(options.DryRun) && out != nil {\n\t\treturn out, true, nil\n\t}\n\n\t// delete immediately, or no graceful deletion supported\n\tklog.V(6).Infof(\"going to delete %s from registry: \", name)\n\tout = e.NewFunc()\n\tif err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun), nil); err != nil {\n\t\t// Please refer to the place where we set ignoreNotFound for the reason\n\t\t// why we ignore the NotFound error .\n\t\tif storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil {\n\t\t\t// The lastExisting object may not be the last state of the object\n\t\t\t// before its deletion, but it's the best approximation.\n\t\t\tout, err := e.finalizeDelete(ctx, lastExisting, true, options)\n\t\t\treturn out, true, err\n\t\t}\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)\n\t}\n\tout, err = e.finalizeDelete(ctx, out, true, options)\n\treturn out, true, err\n}\n\n// DeleteReturnsDeletedObject implements the rest.MayReturnFullObjectDeleter interface\nfunc (e *Store) DeleteReturnsDeletedObject() bool {\n\treturn e.ReturnDeletedObject\n}\n\n// DeleteCollection removes all items returned by List with a given ListOptions from storage.\n//\n// DeleteCollection is currently NOT atomic. It can happen that only subset of objects\n// will be deleted from storage, and then an error will be returned.\n// In case of success, the list of deleted objects will be returned.\n//\n// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we\n// are removing all objects of a given type) with the current API (it's technically\n// possibly with storage API, but watch is not delivered correctly then).\n// It will be possible to fix it with v3 etcd API.\nfunc (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {\n\tif listOptions == nil {\n\t\tlistOptions = &metainternalversion.ListOptions{}\n\t} else {\n\t\tlistOptions = listOptions.DeepCopy()\n\t}\n\n\tlistObj, err := e.List(ctx, listOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titems, err := meta.ExtractList(listObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(items) == 0 {\n\t\t// Nothing to delete, return now\n\t\treturn listObj, nil\n\t}\n\t// Spawn a number of goroutines, so that we can issue requests to storage\n\t// in parallel to speed up deletion.\n\t// It is proportional to the number of items to delete, up to\n\t// DeleteCollectionWorkers (it doesn't make much sense to spawn 16\n\t// workers to delete 10 items).\n\tworkersNumber := e.DeleteCollectionWorkers\n\tif workersNumber > len(items) {\n\t\tworkersNumber = len(items)\n\t}\n\tif workersNumber < 1 {\n\t\tworkersNumber = 1\n\t}\n\twg := sync.WaitGroup{}\n\ttoProcess := make(chan int, 2*workersNumber)\n\terrs := make(chan error, workersNumber+1)\n\n\tgo func() {\n\t\tdefer utilruntime.HandleCrash(func(panicReason interface{}) {\n\t\t\terrs <- fmt.Errorf(\"DeleteCollection distributor panicked: %v\", panicReason)\n\t\t})\n\t\tfor i := 0; i < len(items); i++ {\n\t\t\ttoProcess <- i\n\t\t}\n\t\tclose(toProcess)\n\t}()\n\n\twg.Add(workersNumber)\n\tfor i := 0; i < workersNumber; i++ {\n\t\tgo func() {\n\t\t\t// panics don't cross goroutine boundaries\n\t\t\tdefer utilruntime.HandleCrash(func(panicReason interface{}) {\n\t\t\t\terrs <- fmt.Errorf(\"DeleteCollection goroutine panicked: %v\", panicReason)\n\t\t\t})\n\t\t\tdefer wg.Done()\n\n\t\t\tfor index := range toProcess {\n\t\t\t\taccessor, err := meta.Accessor(items[index])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) {\n\t\t\t\t\tklog.V(4).Infof(\"Delete %s in DeleteCollection failed: %v\", accessor.GetName(), err)\n\t\t\t\t\terrs <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tselect {\n\tcase err := <-errs:\n\t\treturn nil, err\n\tdefault:\n\t\treturn listObj, nil\n\t}\n}\n\n// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and\n// returns the decorated deleted object if appropriate.\nfunc (e *Store) finalizeDelete(ctx context.Context, obj runtime.Object, runHooks bool, options *metav1.DeleteOptions) (runtime.Object, error) {\n\tif runHooks && e.AfterDelete != nil {\n\t\te.AfterDelete(obj, options)\n\t}\n\tif e.ReturnDeletedObject {\n\t\tif e.Decorator != nil {\n\t\t\te.Decorator(obj)\n\t\t}\n\t\treturn obj, nil\n\t}\n\t// Return information about the deleted object, which enables clients to\n\t// verify that the object was actually deleted and not waiting for finalizers.\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tdetails := &metav1.StatusDetails{\n\t\tName: accessor.GetName(),\n\t\tGroup: qualifiedResource.Group,\n\t\tKind: qualifiedResource.Resource, // Yes we set Kind field to resource.\n\t\tUID: accessor.GetUID(),\n\t}\n\tstatus := &metav1.Status{Status: metav1.StatusSuccess, Details: details}\n\treturn status, nil\n}\n\n// Watch makes a matcher for the given label and field, and calls\n// WatchPredicate. If possible, you should customize PredicateFunc to produce\n// a matcher that matches by key. SelectionPredicate does this for you\n// automatically.\nfunc (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {\n\tlabel := labels.Everything()\n\tif options != nil && options.LabelSelector != nil {\n\t\tlabel = options.LabelSelector\n\t}\n\tfield := fields.Everything()\n\tif options != nil && options.FieldSelector != nil {\n\t\tfield = options.FieldSelector\n\t}\n\tpredicate := e.PredicateFunc(label, field)\n\n\tresourceVersion := \"\"\n\tif options != nil {\n\t\tresourceVersion = options.ResourceVersion\n\t\tpredicate.AllowWatchBookmarks = options.AllowWatchBookmarks\n\t}\n\treturn e.WatchPredicate(ctx, predicate, resourceVersion)\n}\n\n// WatchPredicate starts a watch for the items that matches.\nfunc (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) {\n\tstorageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p}\n\tif name, ok := p.MatchesSingle(); ok {\n\t\tif key, err := e.KeyFunc(ctx, name); err == nil {\n\t\t\tw, err := e.Storage.Watch(ctx, key, storageOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif e.Decorator != nil {\n\t\t\t\treturn newDecoratedWatcher(ctx, w, e.Decorator), nil\n\t\t\t}\n\t\t\treturn w, nil\n\t\t}\n\t\t// if we cannot extract a key based on the current context, the\n\t\t// optimization is skipped\n\t}\n\n\tw, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), storageOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.Decorator != nil {\n\t\treturn newDecoratedWatcher(ctx, w, e.Decorator), nil\n\t}\n\treturn w, nil\n}\n\n// calculateTTL is a helper for retrieving the updated TTL for an object or\n// returning an error if the TTL cannot be calculated. The defaultTTL is\n// changed to 1 if less than zero. Zero means no TTL, not expire immediately.\nfunc (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {\n\t// TODO: validate this is assertion is still valid.\n\n\t// etcd may return a negative TTL for a node if the expiration has not\n\t// occurred due to server lag - we will ensure that the value is at least\n\t// set.\n\tif defaultTTL < 0 {\n\t\tdefaultTTL = 1\n\t}\n\tttl = uint64(defaultTTL)\n\tif e.TTLFunc != nil {\n\t\tttl, err = e.TTLFunc(obj, ttl, update)\n\t}\n\treturn ttl, err\n}\n\n// CompleteWithOptions updates the store with the provided options and\n// defaults common fields.\nfunc (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {\n\tif e.DefaultQualifiedResource.Empty() {\n\t\treturn fmt.Errorf(\"store %#v must have a non-empty qualified resource\", e)\n\t}\n\tif e.NewFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewFunc set\", e.DefaultQualifiedResource.String())\n\t}\n\tif e.NewListFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewListFunc set\", e.DefaultQualifiedResource.String())\n\t}\n\tif (e.KeyRootFunc == nil) != (e.KeyFunc == nil) {\n\t\treturn fmt.Errorf(\"store for %s must set both KeyRootFunc and KeyFunc or neither\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif e.TableConvertor == nil {\n\t\treturn fmt.Errorf(\"store for %s must set TableConvertor; rest.NewDefaultTableConvertor(e.DefaultQualifiedResource) can be used to output just name/creation time\", e.DefaultQualifiedResource.String())\n\t}\n\n\tvar isNamespaced bool\n\tswitch {\n\tcase e.CreateStrategy != nil:\n\t\tisNamespaced = e.CreateStrategy.NamespaceScoped()\n\tcase e.UpdateStrategy != nil:\n\t\tisNamespaced = e.UpdateStrategy.NamespaceScoped()\n\tdefault:\n\t\treturn fmt.Errorf(\"store for %s must have CreateStrategy or UpdateStrategy set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif e.DeleteStrategy == nil {\n\t\treturn fmt.Errorf(\"store for %s must have DeleteStrategy set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif options.RESTOptions == nil {\n\t\treturn fmt.Errorf(\"options for %s must have RESTOptions set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tattrFunc := options.AttrFunc\n\tif attrFunc == nil {\n\t\tif isNamespaced {\n\t\t\tattrFunc = storage.DefaultNamespaceScopedAttr\n\t\t} else {\n\t\t\tattrFunc = storage.DefaultClusterScopedAttr\n\t\t}\n\t}\n\tif e.PredicateFunc == nil {\n\t\te.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {\n\t\t\treturn storage.SelectionPredicate{\n\t\t\t\tLabel: label,\n\t\t\t\tField: field,\n\t\t\t\tGetAttrs: attrFunc,\n\t\t\t}\n\t\t}\n\t}\n\n\terr := validateIndexers(options.Indexers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// ResourcePrefix must come from the underlying factory\n\tprefix := opts.ResourcePrefix\n\tif !strings.HasPrefix(prefix, \"/\") {\n\t\tprefix = \"/\" + prefix\n\t}\n\tif prefix == \"/\" {\n\t\treturn fmt.Errorf(\"store for %s has an invalid prefix %q\", e.DefaultQualifiedResource.String(), opts.ResourcePrefix)\n\t}\n\n\t// Set the default behavior for storage key generation\n\tif e.KeyRootFunc == nil && e.KeyFunc == nil {\n\t\tif isNamespaced {\n\t\t\te.KeyRootFunc = func(ctx context.Context) string {\n\t\t\t\treturn NamespaceKeyRootFunc(ctx, prefix)\n\t\t\t}\n\t\t\te.KeyFunc = func(ctx context.Context, name string) (string, error) {\n\t\t\t\treturn NamespaceKeyFunc(ctx, prefix, name)\n\t\t\t}\n\t\t} else {\n\t\t\te.KeyRootFunc = func(ctx context.Context) string {\n\t\t\t\treturn prefix\n\t\t\t}\n\t\t\te.KeyFunc = func(ctx context.Context, name string) (string, error) {\n\t\t\t\treturn NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// We adapt the store's keyFunc so that we can use it with the StorageDecorator\n\t// without making any assumptions about where objects are stored in etcd\n\tkeyFunc := func(obj runtime.Object) (string, error) {\n\t\taccessor, err := meta.Accessor(obj)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif isNamespaced {\n\t\t\treturn e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName())\n\t\t}\n\n\t\treturn e.KeyFunc(genericapirequest.NewContext(), accessor.GetName())\n\t}\n\n\tif e.DeleteCollectionWorkers == 0 {\n\t\te.DeleteCollectionWorkers = opts.DeleteCollectionWorkers\n\t}\n\n\te.EnableGarbageCollection = opts.EnableGarbageCollection\n\n\tif e.ObjectNameFunc == nil {\n\t\te.ObjectNameFunc = func(obj runtime.Object) (string, error) {\n\t\t\taccessor, err := meta.Accessor(obj)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn accessor.GetName(), nil\n\t\t}\n\t}\n\n\tif e.Storage.Storage == nil {\n\t\te.Storage.Codec = opts.StorageConfig.Codec\n\t\tvar err error\n\t\te.Storage.Storage, e.DestroyFunc, err = opts.Decorator(\n\t\t\topts.StorageConfig,\n\t\t\tprefix,\n\t\t\tkeyFunc,\n\t\t\te.NewFunc,\n\t\t\te.NewListFunc,\n\t\t\tattrFunc,\n\t\t\toptions.TriggerFunc,\n\t\t\toptions.Indexers,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.StorageVersioner = opts.StorageConfig.EncodeVersioner\n\n\t\tif opts.CountMetricPollPeriod > 0 {\n\t\t\tstopFunc := e.startObservingCount(opts.CountMetricPollPeriod)\n\t\t\tpreviousDestroy := e.DestroyFunc\n\t\t\te.DestroyFunc = func() {\n\t\t\t\tstopFunc()\n\t\t\t\tif previousDestroy != nil {\n\t\t\t\t\tpreviousDestroy()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection.\nfunc (e *Store) startObservingCount(period time.Duration) func() {\n\tprefix := e.KeyRootFunc(genericapirequest.NewContext())\n\tresourceName := e.DefaultQualifiedResource.String()\n\tklog.V(2).Infof(\"Monitoring %v count at <storage-prefix>/%v\", resourceName, prefix)\n\tstopCh := make(chan struct{})\n\tgo wait.JitterUntil(func() {\n\t\tcount, err := e.Storage.Count(prefix)\n\t\tif err != nil {\n\t\t\tklog.V(5).Infof(\"Failed to update storage count metric: %v\", err)\n\t\t\tmetrics.UpdateObjectCount(resourceName, -1)\n\t\t} else {\n\t\t\tmetrics.UpdateObjectCount(resourceName, count)\n\t\t}\n\t}, period, resourceCountPollPeriodJitter, true, stopCh)\n\treturn func() { close(stopCh) }\n}\n\nfunc (e *Store) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {\n\tif e.TableConvertor != nil {\n\t\treturn e.TableConvertor.ConvertToTable(ctx, object, tableOptions)\n\t}\n\treturn rest.NewDefaultTableConvertor(e.DefaultQualifiedResource).ConvertToTable(ctx, object, tableOptions)\n}\n\nfunc (e *Store) StorageVersion() runtime.GroupVersioner {\n\treturn e.StorageVersioner\n}\n\n// GetResetFields implements rest.ResetFieldsStrategy\nfunc (e *Store) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {\n\tif e.ResetFieldsStrategy == nil {\n\t\treturn nil\n\t}\n\treturn e.ResetFieldsStrategy.GetResetFields()\n}\n\n// validateIndexers will check the prefix of indexers.\nfunc validateIndexers(indexers *cache.Indexers) error {\n\tif indexers == nil {\n\t\treturn nil\n\t}\n\tfor indexName := range *indexers {\n\t\tif len(indexName) <= 2 || (indexName[:2] != \"l:\" && indexName[:2] != \"f:\") {\n\t\t\treturn fmt.Errorf(\"index must prefix with \\\"l:\\\" or \\\"f:\\\"\")\n\t\t}\n\t}\n\treturn nil\n}\n"
},
{
"file": "pkg/registry/generic/registry/store.go",
"description": "The method first calls `BeforeCreate` which prepares the object for creation by removing the `.status` part of the object, etc.",
"line": 376,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\t\"k8s.io/apimachinery/pkg/api/validation/path\"\n\tmetainternalversion \"k8s.io/apimachinery/pkg/apis/meta/internalversion\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/fields\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/sets\"\n\t\"k8s.io/apimachinery/pkg/util/validation/field\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\tgenericapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/registry/generic\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/storage\"\n\tstoreerr \"k8s.io/apiserver/pkg/storage/errors\"\n\t\"k8s.io/apiserver/pkg/storage/etcd3/metrics\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"sigs.k8s.io/structured-merge-diff/v4/fieldpath\"\n\n\t\"k8s.io/klog/v2\"\n)\n\n// FinishFunc is a function returned by Begin hooks to complete an operation.\ntype FinishFunc func(ctx context.Context, success bool)\n\n// AfterDeleteFunc is the type used for the Store.AfterDelete hook.\ntype AfterDeleteFunc func(obj runtime.Object, options *metav1.DeleteOptions)\n\n// BeginCreateFunc is the type used for the Store.BeginCreate hook.\ntype BeginCreateFunc func(ctx context.Context, obj runtime.Object, options *metav1.CreateOptions) (FinishFunc, error)\n\n// AfterCreateFunc is the type used for the Store.AfterCreate hook.\ntype AfterCreateFunc func(obj runtime.Object, options *metav1.CreateOptions)\n\n// BeginUpdateFunc is the type used for the Store.BeginUpdate hook.\ntype BeginUpdateFunc func(ctx context.Context, obj, old runtime.Object, options *metav1.UpdateOptions) (FinishFunc, error)\n\n// AfterUpdateFunc is the type used for the Store.AfterUpdate hook.\ntype AfterUpdateFunc func(obj runtime.Object, options *metav1.UpdateOptions)\n\n// GenericStore interface can be used for type assertions when we need to access the underlying strategies.\ntype GenericStore interface {\n\tGetCreateStrategy() rest.RESTCreateStrategy\n\tGetUpdateStrategy() rest.RESTUpdateStrategy\n\tGetDeleteStrategy() rest.RESTDeleteStrategy\n}\n\n// Store implements k8s.io/apiserver/pkg/registry/rest.StandardStorage. It's\n// intended to be embeddable and allows the consumer to implement any\n// non-generic functions that are required. This object is intended to be\n// copyable so that it can be used in different ways but share the same\n// underlying behavior.\n//\n// All fields are required unless specified.\n//\n// The intended use of this type is embedding within a Kind specific\n// RESTStorage implementation. This type provides CRUD semantics on a Kubelike\n// resource, handling details like conflict detection with ResourceVersion and\n// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and\n// RESTDeleteStrategy are generic across all backends, and encapsulate logic\n// specific to the API.\n//\n// TODO: make the default exposed methods exactly match a generic RESTStorage\ntype Store struct {\n\t// NewFunc returns a new instance of the type this registry returns for a\n\t// GET of a single object, e.g.:\n\t//\n\t// curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object\n\tNewFunc func() runtime.Object\n\n\t// NewListFunc returns a new list of the type this registry; it is the\n\t// type returned when the resource is listed, e.g.:\n\t//\n\t// curl GET /apis/group/version/namespaces/my-ns/myresource\n\tNewListFunc func() runtime.Object\n\n\t// DefaultQualifiedResource is the pluralized name of the resource.\n\t// This field is used if there is no request info present in the context.\n\t// See qualifiedResourceFromContext for details.\n\tDefaultQualifiedResource schema.GroupResource\n\n\t// KeyRootFunc returns the root etcd key for this resource; should not\n\t// include trailing \"/\". This is used for operations that work on the\n\t// entire collection (listing and watching).\n\t//\n\t// KeyRootFunc and KeyFunc must be supplied together or not at all.\n\tKeyRootFunc func(ctx context.Context) string\n\n\t// KeyFunc returns the key for a specific object in the collection.\n\t// KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace'\n\t// can be gotten from ctx.\n\t//\n\t// KeyFunc and KeyRootFunc must be supplied together or not at all.\n\tKeyFunc func(ctx context.Context, name string) (string, error)\n\n\t// ObjectNameFunc returns the name of an object or an error.\n\tObjectNameFunc func(obj runtime.Object) (string, error)\n\n\t// TTLFunc returns the TTL (time to live) that objects should be persisted\n\t// with. The existing parameter is the current TTL or the default for this\n\t// operation. The update parameter indicates whether this is an operation\n\t// against an existing object.\n\t//\n\t// Objects that are persisted with a TTL are evicted once the TTL expires.\n\tTTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error)\n\n\t// PredicateFunc returns a matcher corresponding to the provided labels\n\t// and fields. The SelectionPredicate returned should return true if the\n\t// object matches the given field and label selectors.\n\tPredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate\n\n\t// EnableGarbageCollection affects the handling of Update and Delete\n\t// requests. Enabling garbage collection allows finalizers to do work to\n\t// finalize this object before the store deletes it.\n\t//\n\t// If any store has garbage collection enabled, it must also be enabled in\n\t// the kube-controller-manager.\n\tEnableGarbageCollection bool\n\n\t// DeleteCollectionWorkers is the maximum number of workers in a single\n\t// DeleteCollection call. Delete requests for the items in a collection\n\t// are issued in parallel.\n\tDeleteCollectionWorkers int\n\n\t// Decorator is an optional exit hook on an object returned from the\n\t// underlying storage. The returned object could be an individual object\n\t// (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for\n\t// integrations that are above storage and should only be used for\n\t// specific cases where storage of the value is not appropriate, since\n\t// they cannot be watched.\n\tDecorator func(runtime.Object)\n\n\t// CreateStrategy implements resource-specific behavior during creation.\n\tCreateStrategy rest.RESTCreateStrategy\n\t// BeginCreate is an optional hook that returns a \"transaction-like\"\n\t// commit/revert function which will be called at the end of the operation,\n\t// but before AfterCreate and Decorator, indicating via the argument\n\t// whether the operation succeeded. If this returns an error, the function\n\t// is not called. Almost nobody should use this hook.\n\tBeginCreate BeginCreateFunc\n\t// AfterCreate implements a further operation to run after a resource is\n\t// created and before it is decorated, optional.\n\tAfterCreate AfterCreateFunc\n\n\t// UpdateStrategy implements resource-specific behavior during updates.\n\tUpdateStrategy rest.RESTUpdateStrategy\n\t// BeginUpdate is an optional hook that returns a \"transaction-like\"\n\t// commit/revert function which will be called at the end of the operation,\n\t// but before AfterUpdate and Decorator, indicating via the argument\n\t// whether the operation succeeded. If this returns an error, the function\n\t// is not called. Almost nobody should use this hook.\n\tBeginUpdate BeginUpdateFunc\n\t// AfterUpdate implements a further operation to run after a resource is\n\t// updated and before it is decorated, optional.\n\tAfterUpdate AfterUpdateFunc\n\n\t// DeleteStrategy implements resource-specific behavior during deletion.\n\tDeleteStrategy rest.RESTDeleteStrategy\n\t// AfterDelete implements a further operation to run after a resource is\n\t// deleted and before it is decorated, optional.\n\tAfterDelete AfterDeleteFunc\n\t// ReturnDeletedObject determines whether the Store returns the object\n\t// that was deleted. Otherwise, return a generic success status response.\n\tReturnDeletedObject bool\n\t// ShouldDeleteDuringUpdate is an optional function to determine whether\n\t// an update from existing to obj should result in a delete.\n\t// If specified, this is checked in addition to standard finalizer,\n\t// deletionTimestamp, and deletionGracePeriodSeconds checks.\n\tShouldDeleteDuringUpdate func(ctx context.Context, key string, obj, existing runtime.Object) bool\n\n\t// TableConvertor is an optional interface for transforming items or lists\n\t// of items into tabular output. If unset, the default will be used.\n\tTableConvertor rest.TableConvertor\n\n\t// ResetFieldsStrategy provides the fields reset by the strategy that\n\t// should not be modified by the user.\n\tResetFieldsStrategy rest.ResetFieldsStrategy\n\n\t// Storage is the interface for the underlying storage for the\n\t// resource. It is wrapped into a \"DryRunnableStorage\" that will\n\t// either pass-through or simply dry-run.\n\tStorage DryRunnableStorage\n\t// StorageVersioner outputs the <group/version/kind> an object will be\n\t// converted to before persisted in etcd, given a list of possible\n\t// kinds of the object.\n\t// If the StorageVersioner is nil, apiserver will leave the\n\t// storageVersionHash as empty in the discovery document.\n\tStorageVersioner runtime.GroupVersioner\n\t// Called to cleanup clients used by the underlying Storage; optional.\n\tDestroyFunc func()\n}\n\n// Note: the rest.StandardStorage interface aggregates the common REST verbs\nvar _ rest.StandardStorage = &Store{}\nvar _ rest.TableConvertor = &Store{}\nvar _ GenericStore = &Store{}\n\nconst (\n\tOptimisticLockErrorMsg = \"the object has been modified; please apply your changes to the latest version and try again\"\n\tresourceCountPollPeriodJitter = 1.2\n)\n\n// NamespaceKeyRootFunc is the default function for constructing storage paths\n// to resource directories enforcing namespace rules.\nfunc NamespaceKeyRootFunc(ctx context.Context, prefix string) string {\n\tkey := prefix\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif ok && len(ns) > 0 {\n\t\tkey = key + \"/\" + ns\n\t}\n\treturn key\n}\n\n// NamespaceKeyFunc is the default function for constructing storage paths to\n// a resource relative to the given prefix enforcing namespace rules. If the\n// context does not contain a namespace, it errors.\nfunc NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {\n\tkey := NamespaceKeyRootFunc(ctx, prefix)\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif !ok || len(ns) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Namespace parameter required.\")\n\t}\n\tif len(name) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Name parameter required.\")\n\t}\n\tif msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {\n\t\treturn \"\", apierrors.NewBadRequest(fmt.Sprintf(\"Name parameter invalid: %q: %s\", name, strings.Join(msgs, \";\")))\n\t}\n\tkey = key + \"/\" + name\n\treturn key, nil\n}\n\n// NoNamespaceKeyFunc is the default function for constructing storage paths\n// to a resource relative to the given prefix without a namespace.\nfunc NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {\n\tif len(name) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Name parameter required.\")\n\t}\n\tif msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {\n\t\treturn \"\", apierrors.NewBadRequest(fmt.Sprintf(\"Name parameter invalid: %q: %s\", name, strings.Join(msgs, \";\")))\n\t}\n\tkey := prefix + \"/\" + name\n\treturn key, nil\n}\n\n// New implements RESTStorage.New.\nfunc (e *Store) New() runtime.Object {\n\treturn e.NewFunc()\n}\n\n// NewList implements rest.Lister.\nfunc (e *Store) NewList() runtime.Object {\n\treturn e.NewListFunc()\n}\n\n// NamespaceScoped indicates whether the resource is namespaced\nfunc (e *Store) NamespaceScoped() bool {\n\tif e.CreateStrategy != nil {\n\t\treturn e.CreateStrategy.NamespaceScoped()\n\t}\n\tif e.UpdateStrategy != nil {\n\t\treturn e.UpdateStrategy.NamespaceScoped()\n\t}\n\n\tpanic(\"programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too\")\n}\n\n// GetCreateStrategy implements GenericStore.\nfunc (e *Store) GetCreateStrategy() rest.RESTCreateStrategy {\n\treturn e.CreateStrategy\n}\n\n// GetUpdateStrategy implements GenericStore.\nfunc (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy {\n\treturn e.UpdateStrategy\n}\n\n// GetDeleteStrategy implements GenericStore.\nfunc (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy {\n\treturn e.DeleteStrategy\n}\n\n// List returns a list of items matching labels and field according to the\n// store's PredicateFunc.\nfunc (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {\n\tlabel := labels.Everything()\n\tif options != nil && options.LabelSelector != nil {\n\t\tlabel = options.LabelSelector\n\t}\n\tfield := fields.Everything()\n\tif options != nil && options.FieldSelector != nil {\n\t\tfield = options.FieldSelector\n\t}\n\tout, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, nil\n}\n\n// ListPredicate returns a list of all the items matching the given\n// SelectionPredicate.\nfunc (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) {\n\tif options == nil {\n\t\t// By default we should serve the request from etcd.\n\t\toptions = &metainternalversion.ListOptions{ResourceVersion: \"\"}\n\t}\n\tp.Limit = options.Limit\n\tp.Continue = options.Continue\n\tlist := e.NewListFunc()\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tstorageOpts := storage.ListOptions{ResourceVersion: options.ResourceVersion, ResourceVersionMatch: options.ResourceVersionMatch, Predicate: p}\n\tif name, ok := p.MatchesSingle(); ok {\n\t\tif key, err := e.KeyFunc(ctx, name); err == nil {\n\t\t\terr := e.Storage.GetToList(ctx, key, storageOpts, list)\n\t\t\treturn list, storeerr.InterpretListError(err, qualifiedResource)\n\t\t}\n\t\t// if we cannot extract a key based on the current context, the optimization is skipped\n\t}\n\n\terr := e.Storage.List(ctx, e.KeyRootFunc(ctx), storageOpts, list)\n\treturn list, storeerr.InterpretListError(err, qualifiedResource)\n}\n\n// finishNothing is a do-nothing FinishFunc.\nfunc finishNothing(context.Context, bool) {}\n\n// Create inserts a new item according to the unique key from the object.\nfunc (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\tvar finishCreate FinishFunc = finishNothing\n\n\tif e.BeginCreate != nil {\n\t\tfn, err := e.BeginCreate(ctx, obj, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfinishCreate = fn\n\t\tdefer func() {\n\t\t\tfinishCreate(ctx, false)\n\t\t}()\n\t}\n\n\tif err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {\n\t\treturn nil, err\n\t}\n\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t// handling chain wants to enforce.\n\tif createValidation != nil {\n\t\tif err := createValidation(ctx, obj.DeepCopyObject()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tname, err := e.ObjectNameFunc(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tttl, err := e.calculateTTL(obj, 0, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := e.NewFunc()\n\tif err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil {\n\t\terr = storeerr.InterpretCreateError(err, qualifiedResource, name)\n\t\terr = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)\n\t\tif !apierrors.IsAlreadyExists(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\tif errGet := e.Storage.Get(ctx, key, storage.GetOptions{}, out); errGet != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taccessor, errGetAcc := meta.Accessor(out)\n\t\tif errGetAcc != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif accessor.GetDeletionTimestamp() != nil {\n\t\t\tmsg := &err.(*apierrors.StatusError).ErrStatus.Message\n\t\t\t*msg = fmt.Sprintf(\"object is being deleted: %s\", *msg)\n\t\t}\n\t\treturn nil, err\n\t}\n\t// The operation has succeeded. Call the finish function if there is one,\n\t// and then make sure the defer doesn't call it again.\n\tfn := finishCreate\n\tfinishCreate = finishNothing\n\tfn(ctx, true)\n\n\tif e.AfterCreate != nil {\n\t\te.AfterCreate(out, options)\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, nil\n}\n\n// ShouldDeleteDuringUpdate is the default function for\n// checking if an object should be deleted during an update.\n// It checks if the new object has no finalizers,\n// the existing object's deletionTimestamp is set, and\n// the existing object's deletionGracePeriodSeconds is 0 or nil\nfunc ShouldDeleteDuringUpdate(ctx context.Context, key string, obj, existing runtime.Object) bool {\n\tnewMeta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\toldMeta, err := meta.Accessor(existing)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\tif len(newMeta.GetFinalizers()) > 0 {\n\t\t// don't delete with finalizers remaining in the new object\n\t\treturn false\n\t}\n\tif oldMeta.GetDeletionTimestamp() == nil {\n\t\t// don't delete if the existing object hasn't had a delete request made\n\t\treturn false\n\t}\n\t// delete if the existing object has no grace period or a grace period of 0\n\treturn oldMeta.GetDeletionGracePeriodSeconds() == nil || *oldMeta.GetDeletionGracePeriodSeconds() == 0\n}\n\n// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list.\n// Used for objects that are either been finalized or have never initialized.\nfunc (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, options *metav1.DeleteOptions) (runtime.Object, bool, error) {\n\tout := e.NewFunc()\n\tklog.V(6).Infof(\"going to delete %s from registry, triggered by update\", name)\n\t// Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb.\n\tif err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryrun.IsDryRun(options.DryRun), nil); err != nil {\n\t\t// Deletion is racy, i.e., there could be multiple update\n\t\t// requests to remove all finalizers from the object, so we\n\t\t// ignore the NotFound error.\n\t\tif storage.IsNotFound(err) {\n\t\t\t_, err := e.finalizeDelete(ctx, obj, true, options)\n\t\t\t// clients are expecting an updated object if a PUT succeeded,\n\t\t\t// but finalizeDelete returns a metav1.Status, so return\n\t\t\t// the object in the request instead.\n\t\t\treturn obj, false, err\n\t\t}\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name)\n\t}\n\t_, err := e.finalizeDelete(ctx, out, true, options)\n\t// clients are expecting an updated object if a PUT succeeded, but\n\t// finalizeDelete returns a metav1.Status, so return the object in\n\t// the request instead.\n\treturn obj, false, err\n}\n\n// Update performs an atomic update and set of the object. Returns the result of the update\n// or an error. If the registry allows create-on-update, the create flow will be executed.\n// A bool is returned along with the object and any errors, to indicate object creation.\nfunc (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar (\n\t\tcreatingObj runtime.Object\n\t\tcreating = false\n\t)\n\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tstoragePreconditions := &storage.Preconditions{}\n\tif preconditions := objInfo.Preconditions(); preconditions != nil {\n\t\tstoragePreconditions.UID = preconditions.UID\n\t\tstoragePreconditions.ResourceVersion = preconditions.ResourceVersion\n\t}\n\n\tout := e.NewFunc()\n\t// deleteObj is only used in case a deletion is carried out\n\tvar deleteObj runtime.Object\n\terr = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {\n\t\texistingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif existingResourceVersion == 0 {\n\t\t\tif !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate {\n\t\t\t\treturn nil, nil, apierrors.NewNotFound(qualifiedResource, name)\n\t\t\t}\n\t\t}\n\n\t\t// Given the existing object, get the new object\n\t\tobj, err := objInfo.UpdatedObject(ctx, existing)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// If AllowUnconditionalUpdate() is true and the object specified by\n\t\t// the user does not have a resource version, then we populate it with\n\t\t// the latest version. Else, we check that the version specified by\n\t\t// the user matches the version of latest storage object.\n\t\tnewResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tdoUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()\n\n\t\tif existingResourceVersion == 0 {\n\t\t\tvar finishCreate FinishFunc = finishNothing\n\n\t\t\tif e.BeginCreate != nil {\n\t\t\t\tfn, err := e.BeginCreate(ctx, obj, newCreateOptionsFromUpdateOptions(options))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t\tfinishCreate = fn\n\t\t\t\tdefer func() {\n\t\t\t\t\tfinishCreate(ctx, false)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\tcreating = true\n\t\t\tcreatingObj = obj\n\t\t\tif err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t\t\t// handling chain wants to enforce.\n\t\t\tif createValidation != nil {\n\t\t\t\tif err := createValidation(ctx, obj.DeepCopyObject()); err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tttl, err := e.calculateTTL(obj, 0, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\t// The operation has succeeded. Call the finish function if there is one,\n\t\t\t// and then make sure the defer doesn't call it again.\n\t\t\tfn := finishCreate\n\t\t\tfinishCreate = finishNothing\n\t\t\tfn(ctx, true)\n\n\t\t\treturn obj, &ttl, nil\n\t\t}\n\n\t\tcreating = false\n\t\tcreatingObj = nil\n\t\tif doUnconditionalUpdate {\n\t\t\t// Update the object's resource version to match the latest\n\t\t\t// storage object's resource version.\n\t\t\terr = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// Check if the object's resource version matches the latest\n\t\t\t// resource version.\n\t\t\tif newResourceVersion == 0 {\n\t\t\t\t// TODO: The Invalid error should have a field for Resource.\n\t\t\t\t// After that field is added, we should fill the Resource and\n\t\t\t\t// leave the Kind field empty. See the discussion in #18526.\n\t\t\t\tqualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}\n\t\t\t\tfieldErrList := field.ErrorList{field.Invalid(field.NewPath(\"metadata\").Child(\"resourceVersion\"), newResourceVersion, \"must be specified for an update\")}\n\t\t\t\treturn nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList)\n\t\t\t}\n\t\t\tif newResourceVersion != existingResourceVersion {\n\t\t\t\treturn nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))\n\t\t\t}\n\t\t}\n\n\t\tvar finishUpdate FinishFunc = finishNothing\n\n\t\tif e.BeginUpdate != nil {\n\t\t\tfn, err := e.BeginUpdate(ctx, obj, existing, options)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tfinishUpdate = fn\n\t\t\tdefer func() {\n\t\t\t\tfinishUpdate(ctx, false)\n\t\t\t}()\n\t\t}\n\n\t\tif err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t\t// handling chain wants to enforce.\n\t\tif updateValidation != nil {\n\t\t\tif err := updateValidation(ctx, obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\t// Check the default delete-during-update conditions, and store-specific conditions if provided\n\t\tif ShouldDeleteDuringUpdate(ctx, key, obj, existing) &&\n\t\t\t(e.ShouldDeleteDuringUpdate == nil || e.ShouldDeleteDuringUpdate(ctx, key, obj, existing)) {\n\t\t\tdeleteObj = obj\n\t\t\treturn nil, nil, errEmptiedFinalizers\n\t\t}\n\t\tttl, err := e.calculateTTL(obj, res.TTL, true)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// The operation has succeeded. Call the finish function if there is one,\n\t\t// and then make sure the defer doesn't call it again.\n\t\tfn := finishUpdate\n\t\tfinishUpdate = finishNothing\n\t\tfn(ctx, true)\n\n\t\tif int64(ttl) != res.TTL {\n\t\t\treturn obj, &ttl, nil\n\t\t}\n\t\treturn obj, nil, nil\n\t}, dryrun.IsDryRun(options.DryRun), nil)\n\n\tif err != nil {\n\t\t// delete the object\n\t\tif err == errEmptiedFinalizers {\n\t\t\treturn e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions, newDeleteOptionsFromUpdateOptions(options))\n\t\t}\n\t\tif creating {\n\t\t\terr = storeerr.InterpretCreateError(err, qualifiedResource, name)\n\t\t\terr = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj)\n\t\t} else {\n\t\t\terr = storeerr.InterpretUpdateError(err, qualifiedResource, name)\n\t\t}\n\t\treturn nil, false, err\n\t}\n\n\tif creating {\n\t\tif e.AfterCreate != nil {\n\t\t\te.AfterCreate(out, newCreateOptionsFromUpdateOptions(options))\n\t\t}\n\t} else {\n\t\tif e.AfterUpdate != nil {\n\t\t\te.AfterUpdate(out, options)\n\t\t}\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, creating, nil\n}\n\n// This is a helper to convert UpdateOptions to CreateOptions for the\n// create-on-update path.\nfunc newCreateOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.CreateOptions {\n\tco := &metav1.CreateOptions{\n\t\tDryRun: in.DryRun,\n\t\tFieldManager: in.FieldManager,\n\t}\n\tco.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\treturn co\n}\n\n// This is a helper to convert UpdateOptions to DeleteOptions for the\n// delete-on-update path.\nfunc newDeleteOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.DeleteOptions {\n\tdo := &metav1.DeleteOptions{\n\t\tDryRun: in.DryRun,\n\t}\n\tdo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"DeleteOptions\"))\n\treturn do\n}\n\n// Get retrieves the item from storage.\nfunc (e *Store) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {\n\tobj := e.NewFunc()\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := e.Storage.Get(ctx, key, storage.GetOptions{ResourceVersion: options.ResourceVersion}, obj); err != nil {\n\t\treturn nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name)\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(obj)\n\t}\n\treturn obj, nil\n}\n\n// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info.\n// If the context has no request info, DefaultQualifiedResource is used.\nfunc (e *Store) qualifiedResourceFromContext(ctx context.Context) schema.GroupResource {\n\tif info, ok := genericapirequest.RequestInfoFrom(ctx); ok {\n\t\treturn schema.GroupResource{Group: info.APIGroup, Resource: info.Resource}\n\t}\n\t// some implementations access storage directly and thus the context has no RequestInfo\n\treturn e.DefaultQualifiedResource\n}\n\nvar (\n\terrAlreadyDeleting = fmt.Errorf(\"abort delete\")\n\terrDeleteNow = fmt.Errorf(\"delete now\")\n\terrEmptiedFinalizers = fmt.Errorf(\"emptied finalizers\")\n)\n\n// shouldOrphanDependents returns true if the finalizer for orphaning should be set\n// updated for FinalizerOrphanDependents. In the order of highest to lowest\n// priority, there are three factors affect whether to add/remove the\n// FinalizerOrphanDependents: options, existing finalizers of the object,\n// and e.DeleteStrategy.DefaultGarbageCollectionPolicy.\nfunc shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {\n\t// Get default GC policy from this REST object type\n\tgcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy)\n\tvar defaultGCPolicy rest.GarbageCollectionPolicy\n\tif ok {\n\t\tdefaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx)\n\t}\n\n\tif defaultGCPolicy == rest.Unsupported {\n\t\t// return false to indicate that we should NOT orphan\n\t\treturn false\n\t}\n\n\t// An explicit policy was set at deletion time, that overrides everything\n\t//lint:ignore SA1019 backwards compatibility\n\tif options != nil && options.OrphanDependents != nil {\n\t\t//lint:ignore SA1019 backwards compatibility\n\t\treturn *options.OrphanDependents\n\t}\n\tif options != nil && options.PropagationPolicy != nil {\n\t\tswitch *options.PropagationPolicy {\n\t\tcase metav1.DeletePropagationOrphan:\n\t\t\treturn true\n\t\tcase metav1.DeletePropagationBackground, metav1.DeletePropagationForeground:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If a finalizer is set in the object, it overrides the default\n\t// validation should make sure the two cases won't be true at the same time.\n\tfinalizers := accessor.GetFinalizers()\n\tfor _, f := range finalizers {\n\t\tswitch f {\n\t\tcase metav1.FinalizerOrphanDependents:\n\t\t\treturn true\n\t\tcase metav1.FinalizerDeleteDependents:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Get default orphan policy from this REST object type if it exists\n\treturn defaultGCPolicy == rest.OrphanDependents\n}\n\n// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set\n// updated for FinalizerDeleteDependents. In the order of highest to lowest\n// priority, there are three factors affect whether to add/remove the\n// FinalizerDeleteDependents: options, existing finalizers of the object, and\n// e.DeleteStrategy.DefaultGarbageCollectionPolicy.\nfunc shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {\n\t// Get default GC policy from this REST object type\n\tif gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported {\n\t\t// return false to indicate that we should NOT delete in foreground\n\t\treturn false\n\t}\n\n\t// If an explicit policy was set at deletion time, that overrides both\n\t//lint:ignore SA1019 backwards compatibility\n\tif options != nil && options.OrphanDependents != nil {\n\t\treturn false\n\t}\n\tif options != nil && options.PropagationPolicy != nil {\n\t\tswitch *options.PropagationPolicy {\n\t\tcase metav1.DeletePropagationForeground:\n\t\t\treturn true\n\t\tcase metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If a finalizer is set in the object, it overrides the default\n\t// validation has made sure the two cases won't be true at the same time.\n\tfinalizers := accessor.GetFinalizers()\n\tfor _, f := range finalizers {\n\t\tswitch f {\n\t\tcase metav1.FinalizerDeleteDependents:\n\t\t\treturn true\n\t\tcase metav1.FinalizerOrphanDependents:\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn false\n}\n\n// deletionFinalizersForGarbageCollection analyzes the object and delete options\n// to determine whether the object is in need of finalization by the garbage\n// collector. If so, returns the set of deletion finalizers to apply and a bool\n// indicating whether the finalizer list has changed and is in need of updating.\n//\n// The finalizers returned are intended to be handled by the garbage collector.\n// If garbage collection is disabled for the store, this function returns false\n// to ensure finalizers aren't set which will never be cleared.\nfunc deletionFinalizersForGarbageCollection(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) {\n\tif !e.EnableGarbageCollection {\n\t\treturn false, []string{}\n\t}\n\tshouldOrphan := shouldOrphanDependents(ctx, e, accessor, options)\n\tshouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options)\n\tnewFinalizers := []string{}\n\n\t// first remove both finalizers, add them back if needed.\n\tfor _, f := range accessor.GetFinalizers() {\n\t\tif f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents {\n\t\t\tcontinue\n\t\t}\n\t\tnewFinalizers = append(newFinalizers, f)\n\t}\n\n\tif shouldOrphan {\n\t\tnewFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents)\n\t}\n\tif shouldDeleteDependentInForeground {\n\t\tnewFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents)\n\t}\n\n\toldFinalizerSet := sets.NewString(accessor.GetFinalizers()...)\n\tnewFinalizersSet := sets.NewString(newFinalizers...)\n\tif oldFinalizerSet.Equal(newFinalizersSet) {\n\t\treturn false, accessor.GetFinalizers()\n\t}\n\treturn true, newFinalizers\n}\n\n// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the\n// DeletionTimestamp to \"now\" if there is no existing deletionTimestamp or if the existing\n// deletionTimestamp is further in future. Finalizers are watching for such updates and will\n// finalize the object if their IDs are present in the object's Finalizers list.\nfunc markAsDeleting(obj runtime.Object, now time.Time) (err error) {\n\tobjectMeta, kerr := meta.Accessor(obj)\n\tif kerr != nil {\n\t\treturn kerr\n\t}\n\t// This handles Generation bump for resources that don't support graceful\n\t// deletion. For resources that support graceful deletion is handle in\n\t// pkg/api/rest/delete.go\n\tif objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 {\n\t\tobjectMeta.SetGeneration(objectMeta.GetGeneration() + 1)\n\t}\n\texistingDeletionTimestamp := objectMeta.GetDeletionTimestamp()\n\tif existingDeletionTimestamp == nil || existingDeletionTimestamp.After(now) {\n\t\tmetaNow := metav1.NewTime(now)\n\t\tobjectMeta.SetDeletionTimestamp(&metaNow)\n\t}\n\tvar zero int64 = 0\n\tobjectMeta.SetDeletionGracePeriodSeconds(&zero)\n\treturn nil\n}\n\n// updateForGracefulDeletionAndFinalizers updates the given object for\n// graceful deletion and finalization by setting the deletion timestamp and\n// grace period seconds (graceful deletion) and updating the list of\n// finalizers (finalization); it returns:\n//\n// 1. an error\n// 2. a boolean indicating that the object was not found, but it should be\n// ignored\n// 3. a boolean indicating that the object's grace period is exhausted and it\n// should be deleted immediately\n// 4. a new output object with the state that was updated\n// 5. a copy of the last existing state of the object\nfunc (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {\n\tlastGraceful := int64(0)\n\tvar pendingFinalizers bool\n\tout = e.NewFunc()\n\terr = e.Storage.GuaranteedUpdate(\n\t\tctx,\n\t\tkey,\n\t\tout,\n\t\tfalse, /* ignoreNotFound */\n\t\t&preconditions,\n\t\tstorage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) {\n\t\t\tif err := deleteValidation(ctx, existing); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgraceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif pendingGraceful {\n\t\t\t\treturn nil, errAlreadyDeleting\n\t\t\t}\n\n\t\t\t// Add/remove the orphan finalizer as the options dictates.\n\t\t\t// Note that this occurs after checking pendingGraceufl, so\n\t\t\t// finalizers cannot be updated via DeleteOptions if deletion has\n\t\t\t// started.\n\t\t\texistingAccessor, err := meta.Accessor(existing)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tneedsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options)\n\t\t\tif needsUpdate {\n\t\t\t\texistingAccessor.SetFinalizers(newFinalizers)\n\t\t\t}\n\n\t\t\tpendingFinalizers = len(existingAccessor.GetFinalizers()) != 0\n\t\t\tif !graceful {\n\t\t\t\t// set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion\n\t\t\t\tif pendingFinalizers {\n\t\t\t\t\tklog.V(6).Infof(\"update the DeletionTimestamp to \\\"now\\\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers\", name)\n\t\t\t\t\terr = markAsDeleting(existing, time.Now())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn existing, nil\n\t\t\t\t}\n\t\t\t\treturn nil, errDeleteNow\n\t\t\t}\n\t\t\tlastGraceful = *options.GracePeriodSeconds\n\t\t\tlastExisting = existing\n\t\t\treturn existing, nil\n\t\t}),\n\t\tdryrun.IsDryRun(options.DryRun),\n\t\tnil,\n\t)\n\tswitch err {\n\tcase nil:\n\t\t// If there are pending finalizers, we never delete the object immediately.\n\t\tif pendingFinalizers {\n\t\t\treturn nil, false, false, out, lastExisting\n\t\t}\n\t\tif lastGraceful > 0 {\n\t\t\treturn nil, false, false, out, lastExisting\n\t\t}\n\t\t// If we are here, the registry supports grace period mechanism and\n\t\t// we are intentionally delete gracelessly. In this case, we may\n\t\t// enter a race with other k8s components. If other component wins\n\t\t// the race, the object will not be found, and we should tolerate\n\t\t// the NotFound error. See\n\t\t// https://github.com/kubernetes/kubernetes/issues/19403 for\n\t\t// details.\n\t\treturn nil, true, true, out, lastExisting\n\tcase errDeleteNow:\n\t\t// we've updated the object to have a zero grace period, or it's already at 0, so\n\t\t// we should fall through and truly delete the object.\n\t\treturn nil, false, true, out, lastExisting\n\tcase errAlreadyDeleting:\n\t\tout, err = e.finalizeDelete(ctx, in, true, options)\n\t\treturn err, false, false, out, lastExisting\n\tdefault:\n\t\treturn storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting\n\t}\n}\n\n// Delete removes the item from storage.\nfunc (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tobj := e.NewFunc()\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tif err = e.Storage.Get(ctx, key, storage.GetOptions{}, obj); err != nil {\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)\n\t}\n\n\t// support older consumers of delete by treating \"nil\" as delete immediately\n\tif options == nil {\n\t\toptions = metav1.NewDeleteOptions(0)\n\t}\n\tvar preconditions storage.Preconditions\n\tif options.Preconditions != nil {\n\t\tpreconditions.UID = options.Preconditions.UID\n\t\tpreconditions.ResourceVersion = options.Preconditions.ResourceVersion\n\t}\n\tgraceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\t// this means finalizers cannot be updated via DeleteOptions if a deletion is already pending\n\tif pendingGraceful {\n\t\tout, err := e.finalizeDelete(ctx, obj, false, options)\n\t\treturn out, false, err\n\t}\n\t// check if obj has pending finalizers\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, false, apierrors.NewInternalError(err)\n\t}\n\tpendingFinalizers := len(accessor.GetFinalizers()) != 0\n\tvar ignoreNotFound bool\n\tvar deleteImmediately bool = true\n\tvar lastExisting, out runtime.Object\n\n\t// Handle combinations of graceful deletion and finalization by issuing\n\t// the correct updates.\n\tshouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options)\n\t// TODO: remove the check, because we support no-op updates now.\n\tif graceful || pendingFinalizers || shouldUpdateFinalizers {\n\t\terr, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, deleteValidation, obj)\n\t\t// Update the preconditions.ResourceVersion if set since we updated the object.\n\t\tif err == nil && deleteImmediately && preconditions.ResourceVersion != nil {\n\t\t\taccessor, err = meta.Accessor(out)\n\t\t\tif err != nil {\n\t\t\t\treturn out, false, apierrors.NewInternalError(err)\n\t\t\t}\n\t\t\tresourceVersion := accessor.GetResourceVersion()\n\t\t\tpreconditions.ResourceVersion = &resourceVersion\n\t\t}\n\t}\n\n\t// !deleteImmediately covers all cases where err != nil. We keep both to be future-proof.\n\tif !deleteImmediately || err != nil {\n\t\treturn out, false, err\n\t}\n\n\t// Going further in this function is not useful when we are\n\t// performing a dry-run request. Worse, it will actually\n\t// override \"out\" with the version of the object in database\n\t// that doesn't have the finalizer and deletiontimestamp set\n\t// (because the update above was dry-run too). If we already\n\t// have that version available, let's just return it now,\n\t// otherwise, we can call dry-run delete that will get us the\n\t// latest version of the object.\n\tif dryrun.IsDryRun(options.DryRun) && out != nil {\n\t\treturn out, true, nil\n\t}\n\n\t// delete immediately, or no graceful deletion supported\n\tklog.V(6).Infof(\"going to delete %s from registry: \", name)\n\tout = e.NewFunc()\n\tif err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun), nil); err != nil {\n\t\t// Please refer to the place where we set ignoreNotFound for the reason\n\t\t// why we ignore the NotFound error .\n\t\tif storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil {\n\t\t\t// The lastExisting object may not be the last state of the object\n\t\t\t// before its deletion, but it's the best approximation.\n\t\t\tout, err := e.finalizeDelete(ctx, lastExisting, true, options)\n\t\t\treturn out, true, err\n\t\t}\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)\n\t}\n\tout, err = e.finalizeDelete(ctx, out, true, options)\n\treturn out, true, err\n}\n\n// DeleteReturnsDeletedObject implements the rest.MayReturnFullObjectDeleter interface\nfunc (e *Store) DeleteReturnsDeletedObject() bool {\n\treturn e.ReturnDeletedObject\n}\n\n// DeleteCollection removes all items returned by List with a given ListOptions from storage.\n//\n// DeleteCollection is currently NOT atomic. It can happen that only subset of objects\n// will be deleted from storage, and then an error will be returned.\n// In case of success, the list of deleted objects will be returned.\n//\n// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we\n// are removing all objects of a given type) with the current API (it's technically\n// possibly with storage API, but watch is not delivered correctly then).\n// It will be possible to fix it with v3 etcd API.\nfunc (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {\n\tif listOptions == nil {\n\t\tlistOptions = &metainternalversion.ListOptions{}\n\t} else {\n\t\tlistOptions = listOptions.DeepCopy()\n\t}\n\n\tlistObj, err := e.List(ctx, listOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titems, err := meta.ExtractList(listObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(items) == 0 {\n\t\t// Nothing to delete, return now\n\t\treturn listObj, nil\n\t}\n\t// Spawn a number of goroutines, so that we can issue requests to storage\n\t// in parallel to speed up deletion.\n\t// It is proportional to the number of items to delete, up to\n\t// DeleteCollectionWorkers (it doesn't make much sense to spawn 16\n\t// workers to delete 10 items).\n\tworkersNumber := e.DeleteCollectionWorkers\n\tif workersNumber > len(items) {\n\t\tworkersNumber = len(items)\n\t}\n\tif workersNumber < 1 {\n\t\tworkersNumber = 1\n\t}\n\twg := sync.WaitGroup{}\n\ttoProcess := make(chan int, 2*workersNumber)\n\terrs := make(chan error, workersNumber+1)\n\n\tgo func() {\n\t\tdefer utilruntime.HandleCrash(func(panicReason interface{}) {\n\t\t\terrs <- fmt.Errorf(\"DeleteCollection distributor panicked: %v\", panicReason)\n\t\t})\n\t\tfor i := 0; i < len(items); i++ {\n\t\t\ttoProcess <- i\n\t\t}\n\t\tclose(toProcess)\n\t}()\n\n\twg.Add(workersNumber)\n\tfor i := 0; i < workersNumber; i++ {\n\t\tgo func() {\n\t\t\t// panics don't cross goroutine boundaries\n\t\t\tdefer utilruntime.HandleCrash(func(panicReason interface{}) {\n\t\t\t\terrs <- fmt.Errorf(\"DeleteCollection goroutine panicked: %v\", panicReason)\n\t\t\t})\n\t\t\tdefer wg.Done()\n\n\t\t\tfor index := range toProcess {\n\t\t\t\taccessor, err := meta.Accessor(items[index])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) {\n\t\t\t\t\tklog.V(4).Infof(\"Delete %s in DeleteCollection failed: %v\", accessor.GetName(), err)\n\t\t\t\t\terrs <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tselect {\n\tcase err := <-errs:\n\t\treturn nil, err\n\tdefault:\n\t\treturn listObj, nil\n\t}\n}\n\n// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and\n// returns the decorated deleted object if appropriate.\nfunc (e *Store) finalizeDelete(ctx context.Context, obj runtime.Object, runHooks bool, options *metav1.DeleteOptions) (runtime.Object, error) {\n\tif runHooks && e.AfterDelete != nil {\n\t\te.AfterDelete(obj, options)\n\t}\n\tif e.ReturnDeletedObject {\n\t\tif e.Decorator != nil {\n\t\t\te.Decorator(obj)\n\t\t}\n\t\treturn obj, nil\n\t}\n\t// Return information about the deleted object, which enables clients to\n\t// verify that the object was actually deleted and not waiting for finalizers.\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tdetails := &metav1.StatusDetails{\n\t\tName: accessor.GetName(),\n\t\tGroup: qualifiedResource.Group,\n\t\tKind: qualifiedResource.Resource, // Yes we set Kind field to resource.\n\t\tUID: accessor.GetUID(),\n\t}\n\tstatus := &metav1.Status{Status: metav1.StatusSuccess, Details: details}\n\treturn status, nil\n}\n\n// Watch makes a matcher for the given label and field, and calls\n// WatchPredicate. If possible, you should customize PredicateFunc to produce\n// a matcher that matches by key. SelectionPredicate does this for you\n// automatically.\nfunc (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {\n\tlabel := labels.Everything()\n\tif options != nil && options.LabelSelector != nil {\n\t\tlabel = options.LabelSelector\n\t}\n\tfield := fields.Everything()\n\tif options != nil && options.FieldSelector != nil {\n\t\tfield = options.FieldSelector\n\t}\n\tpredicate := e.PredicateFunc(label, field)\n\n\tresourceVersion := \"\"\n\tif options != nil {\n\t\tresourceVersion = options.ResourceVersion\n\t\tpredicate.AllowWatchBookmarks = options.AllowWatchBookmarks\n\t}\n\treturn e.WatchPredicate(ctx, predicate, resourceVersion)\n}\n\n// WatchPredicate starts a watch for the items that matches.\nfunc (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) {\n\tstorageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p}\n\tif name, ok := p.MatchesSingle(); ok {\n\t\tif key, err := e.KeyFunc(ctx, name); err == nil {\n\t\t\tw, err := e.Storage.Watch(ctx, key, storageOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif e.Decorator != nil {\n\t\t\t\treturn newDecoratedWatcher(ctx, w, e.Decorator), nil\n\t\t\t}\n\t\t\treturn w, nil\n\t\t}\n\t\t// if we cannot extract a key based on the current context, the\n\t\t// optimization is skipped\n\t}\n\n\tw, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), storageOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.Decorator != nil {\n\t\treturn newDecoratedWatcher(ctx, w, e.Decorator), nil\n\t}\n\treturn w, nil\n}\n\n// calculateTTL is a helper for retrieving the updated TTL for an object or\n// returning an error if the TTL cannot be calculated. The defaultTTL is\n// changed to 1 if less than zero. Zero means no TTL, not expire immediately.\nfunc (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {\n\t// TODO: validate this is assertion is still valid.\n\n\t// etcd may return a negative TTL for a node if the expiration has not\n\t// occurred due to server lag - we will ensure that the value is at least\n\t// set.\n\tif defaultTTL < 0 {\n\t\tdefaultTTL = 1\n\t}\n\tttl = uint64(defaultTTL)\n\tif e.TTLFunc != nil {\n\t\tttl, err = e.TTLFunc(obj, ttl, update)\n\t}\n\treturn ttl, err\n}\n\n// CompleteWithOptions updates the store with the provided options and\n// defaults common fields.\nfunc (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {\n\tif e.DefaultQualifiedResource.Empty() {\n\t\treturn fmt.Errorf(\"store %#v must have a non-empty qualified resource\", e)\n\t}\n\tif e.NewFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewFunc set\", e.DefaultQualifiedResource.String())\n\t}\n\tif e.NewListFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewListFunc set\", e.DefaultQualifiedResource.String())\n\t}\n\tif (e.KeyRootFunc == nil) != (e.KeyFunc == nil) {\n\t\treturn fmt.Errorf(\"store for %s must set both KeyRootFunc and KeyFunc or neither\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif e.TableConvertor == nil {\n\t\treturn fmt.Errorf(\"store for %s must set TableConvertor; rest.NewDefaultTableConvertor(e.DefaultQualifiedResource) can be used to output just name/creation time\", e.DefaultQualifiedResource.String())\n\t}\n\n\tvar isNamespaced bool\n\tswitch {\n\tcase e.CreateStrategy != nil:\n\t\tisNamespaced = e.CreateStrategy.NamespaceScoped()\n\tcase e.UpdateStrategy != nil:\n\t\tisNamespaced = e.UpdateStrategy.NamespaceScoped()\n\tdefault:\n\t\treturn fmt.Errorf(\"store for %s must have CreateStrategy or UpdateStrategy set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif e.DeleteStrategy == nil {\n\t\treturn fmt.Errorf(\"store for %s must have DeleteStrategy set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif options.RESTOptions == nil {\n\t\treturn fmt.Errorf(\"options for %s must have RESTOptions set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tattrFunc := options.AttrFunc\n\tif attrFunc == nil {\n\t\tif isNamespaced {\n\t\t\tattrFunc = storage.DefaultNamespaceScopedAttr\n\t\t} else {\n\t\t\tattrFunc = storage.DefaultClusterScopedAttr\n\t\t}\n\t}\n\tif e.PredicateFunc == nil {\n\t\te.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {\n\t\t\treturn storage.SelectionPredicate{\n\t\t\t\tLabel: label,\n\t\t\t\tField: field,\n\t\t\t\tGetAttrs: attrFunc,\n\t\t\t}\n\t\t}\n\t}\n\n\terr := validateIndexers(options.Indexers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// ResourcePrefix must come from the underlying factory\n\tprefix := opts.ResourcePrefix\n\tif !strings.HasPrefix(prefix, \"/\") {\n\t\tprefix = \"/\" + prefix\n\t}\n\tif prefix == \"/\" {\n\t\treturn fmt.Errorf(\"store for %s has an invalid prefix %q\", e.DefaultQualifiedResource.String(), opts.ResourcePrefix)\n\t}\n\n\t// Set the default behavior for storage key generation\n\tif e.KeyRootFunc == nil && e.KeyFunc == nil {\n\t\tif isNamespaced {\n\t\t\te.KeyRootFunc = func(ctx context.Context) string {\n\t\t\t\treturn NamespaceKeyRootFunc(ctx, prefix)\n\t\t\t}\n\t\t\te.KeyFunc = func(ctx context.Context, name string) (string, error) {\n\t\t\t\treturn NamespaceKeyFunc(ctx, prefix, name)\n\t\t\t}\n\t\t} else {\n\t\t\te.KeyRootFunc = func(ctx context.Context) string {\n\t\t\t\treturn prefix\n\t\t\t}\n\t\t\te.KeyFunc = func(ctx context.Context, name string) (string, error) {\n\t\t\t\treturn NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// We adapt the store's keyFunc so that we can use it with the StorageDecorator\n\t// without making any assumptions about where objects are stored in etcd\n\tkeyFunc := func(obj runtime.Object) (string, error) {\n\t\taccessor, err := meta.Accessor(obj)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif isNamespaced {\n\t\t\treturn e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName())\n\t\t}\n\n\t\treturn e.KeyFunc(genericapirequest.NewContext(), accessor.GetName())\n\t}\n\n\tif e.DeleteCollectionWorkers == 0 {\n\t\te.DeleteCollectionWorkers = opts.DeleteCollectionWorkers\n\t}\n\n\te.EnableGarbageCollection = opts.EnableGarbageCollection\n\n\tif e.ObjectNameFunc == nil {\n\t\te.ObjectNameFunc = func(obj runtime.Object) (string, error) {\n\t\t\taccessor, err := meta.Accessor(obj)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn accessor.GetName(), nil\n\t\t}\n\t}\n\n\tif e.Storage.Storage == nil {\n\t\te.Storage.Codec = opts.StorageConfig.Codec\n\t\tvar err error\n\t\te.Storage.Storage, e.DestroyFunc, err = opts.Decorator(\n\t\t\topts.StorageConfig,\n\t\t\tprefix,\n\t\t\tkeyFunc,\n\t\t\te.NewFunc,\n\t\t\te.NewListFunc,\n\t\t\tattrFunc,\n\t\t\toptions.TriggerFunc,\n\t\t\toptions.Indexers,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.StorageVersioner = opts.StorageConfig.EncodeVersioner\n\n\t\tif opts.CountMetricPollPeriod > 0 {\n\t\t\tstopFunc := e.startObservingCount(opts.CountMetricPollPeriod)\n\t\t\tpreviousDestroy := e.DestroyFunc\n\t\t\te.DestroyFunc = func() {\n\t\t\t\tstopFunc()\n\t\t\t\tif previousDestroy != nil {\n\t\t\t\t\tpreviousDestroy()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection.\nfunc (e *Store) startObservingCount(period time.Duration) func() {\n\tprefix := e.KeyRootFunc(genericapirequest.NewContext())\n\tresourceName := e.DefaultQualifiedResource.String()\n\tklog.V(2).Infof(\"Monitoring %v count at <storage-prefix>/%v\", resourceName, prefix)\n\tstopCh := make(chan struct{})\n\tgo wait.JitterUntil(func() {\n\t\tcount, err := e.Storage.Count(prefix)\n\t\tif err != nil {\n\t\t\tklog.V(5).Infof(\"Failed to update storage count metric: %v\", err)\n\t\t\tmetrics.UpdateObjectCount(resourceName, -1)\n\t\t} else {\n\t\t\tmetrics.UpdateObjectCount(resourceName, count)\n\t\t}\n\t}, period, resourceCountPollPeriodJitter, true, stopCh)\n\treturn func() { close(stopCh) }\n}\n\nfunc (e *Store) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {\n\tif e.TableConvertor != nil {\n\t\treturn e.TableConvertor.ConvertToTable(ctx, object, tableOptions)\n\t}\n\treturn rest.NewDefaultTableConvertor(e.DefaultQualifiedResource).ConvertToTable(ctx, object, tableOptions)\n}\n\nfunc (e *Store) StorageVersion() runtime.GroupVersioner {\n\treturn e.StorageVersioner\n}\n\n// GetResetFields implements rest.ResetFieldsStrategy\nfunc (e *Store) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {\n\tif e.ResetFieldsStrategy == nil {\n\t\treturn nil\n\t}\n\treturn e.ResetFieldsStrategy.GetResetFields()\n}\n\n// validateIndexers will check the prefix of indexers.\nfunc validateIndexers(indexers *cache.Indexers) error {\n\tif indexers == nil {\n\t\treturn nil\n\t}\n\tfor indexName := range *indexers {\n\t\tif len(indexName) <= 2 || (indexName[:2] != \"l:\" && indexName[:2] != \"f:\") {\n\t\t\treturn fmt.Errorf(\"index must prefix with \\\"l:\\\" or \\\"f:\\\"\")\n\t\t}\n\t}\n\treturn nil\n}\n"
},
{
"file": "pkg/registry/generic/registry/store.go",
"description": "Finally it validates the object before moving on to store it in etcd.",
"line": 382,
"contents": "/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\t\"k8s.io/apimachinery/pkg/api/validation/path\"\n\tmetainternalversion \"k8s.io/apimachinery/pkg/apis/meta/internalversion\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/fields\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/sets\"\n\t\"k8s.io/apimachinery/pkg/util/validation/field\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\tgenericapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n\t\"k8s.io/apiserver/pkg/registry/generic\"\n\t\"k8s.io/apiserver/pkg/registry/rest\"\n\t\"k8s.io/apiserver/pkg/storage\"\n\tstoreerr \"k8s.io/apiserver/pkg/storage/errors\"\n\t\"k8s.io/apiserver/pkg/storage/etcd3/metrics\"\n\t\"k8s.io/apiserver/pkg/util/dryrun\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"sigs.k8s.io/structured-merge-diff/v4/fieldpath\"\n\n\t\"k8s.io/klog/v2\"\n)\n\n// FinishFunc is a function returned by Begin hooks to complete an operation.\ntype FinishFunc func(ctx context.Context, success bool)\n\n// AfterDeleteFunc is the type used for the Store.AfterDelete hook.\ntype AfterDeleteFunc func(obj runtime.Object, options *metav1.DeleteOptions)\n\n// BeginCreateFunc is the type used for the Store.BeginCreate hook.\ntype BeginCreateFunc func(ctx context.Context, obj runtime.Object, options *metav1.CreateOptions) (FinishFunc, error)\n\n// AfterCreateFunc is the type used for the Store.AfterCreate hook.\ntype AfterCreateFunc func(obj runtime.Object, options *metav1.CreateOptions)\n\n// BeginUpdateFunc is the type used for the Store.BeginUpdate hook.\ntype BeginUpdateFunc func(ctx context.Context, obj, old runtime.Object, options *metav1.UpdateOptions) (FinishFunc, error)\n\n// AfterUpdateFunc is the type used for the Store.AfterUpdate hook.\ntype AfterUpdateFunc func(obj runtime.Object, options *metav1.UpdateOptions)\n\n// GenericStore interface can be used for type assertions when we need to access the underlying strategies.\ntype GenericStore interface {\n\tGetCreateStrategy() rest.RESTCreateStrategy\n\tGetUpdateStrategy() rest.RESTUpdateStrategy\n\tGetDeleteStrategy() rest.RESTDeleteStrategy\n}\n\n// Store implements k8s.io/apiserver/pkg/registry/rest.StandardStorage. It's\n// intended to be embeddable and allows the consumer to implement any\n// non-generic functions that are required. This object is intended to be\n// copyable so that it can be used in different ways but share the same\n// underlying behavior.\n//\n// All fields are required unless specified.\n//\n// The intended use of this type is embedding within a Kind specific\n// RESTStorage implementation. This type provides CRUD semantics on a Kubelike\n// resource, handling details like conflict detection with ResourceVersion and\n// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and\n// RESTDeleteStrategy are generic across all backends, and encapsulate logic\n// specific to the API.\n//\n// TODO: make the default exposed methods exactly match a generic RESTStorage\ntype Store struct {\n\t// NewFunc returns a new instance of the type this registry returns for a\n\t// GET of a single object, e.g.:\n\t//\n\t// curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object\n\tNewFunc func() runtime.Object\n\n\t// NewListFunc returns a new list of the type this registry; it is the\n\t// type returned when the resource is listed, e.g.:\n\t//\n\t// curl GET /apis/group/version/namespaces/my-ns/myresource\n\tNewListFunc func() runtime.Object\n\n\t// DefaultQualifiedResource is the pluralized name of the resource.\n\t// This field is used if there is no request info present in the context.\n\t// See qualifiedResourceFromContext for details.\n\tDefaultQualifiedResource schema.GroupResource\n\n\t// KeyRootFunc returns the root etcd key for this resource; should not\n\t// include trailing \"/\". This is used for operations that work on the\n\t// entire collection (listing and watching).\n\t//\n\t// KeyRootFunc and KeyFunc must be supplied together or not at all.\n\tKeyRootFunc func(ctx context.Context) string\n\n\t// KeyFunc returns the key for a specific object in the collection.\n\t// KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace'\n\t// can be gotten from ctx.\n\t//\n\t// KeyFunc and KeyRootFunc must be supplied together or not at all.\n\tKeyFunc func(ctx context.Context, name string) (string, error)\n\n\t// ObjectNameFunc returns the name of an object or an error.\n\tObjectNameFunc func(obj runtime.Object) (string, error)\n\n\t// TTLFunc returns the TTL (time to live) that objects should be persisted\n\t// with. The existing parameter is the current TTL or the default for this\n\t// operation. The update parameter indicates whether this is an operation\n\t// against an existing object.\n\t//\n\t// Objects that are persisted with a TTL are evicted once the TTL expires.\n\tTTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error)\n\n\t// PredicateFunc returns a matcher corresponding to the provided labels\n\t// and fields. The SelectionPredicate returned should return true if the\n\t// object matches the given field and label selectors.\n\tPredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate\n\n\t// EnableGarbageCollection affects the handling of Update and Delete\n\t// requests. Enabling garbage collection allows finalizers to do work to\n\t// finalize this object before the store deletes it.\n\t//\n\t// If any store has garbage collection enabled, it must also be enabled in\n\t// the kube-controller-manager.\n\tEnableGarbageCollection bool\n\n\t// DeleteCollectionWorkers is the maximum number of workers in a single\n\t// DeleteCollection call. Delete requests for the items in a collection\n\t// are issued in parallel.\n\tDeleteCollectionWorkers int\n\n\t// Decorator is an optional exit hook on an object returned from the\n\t// underlying storage. The returned object could be an individual object\n\t// (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for\n\t// integrations that are above storage and should only be used for\n\t// specific cases where storage of the value is not appropriate, since\n\t// they cannot be watched.\n\tDecorator func(runtime.Object)\n\n\t// CreateStrategy implements resource-specific behavior during creation.\n\tCreateStrategy rest.RESTCreateStrategy\n\t// BeginCreate is an optional hook that returns a \"transaction-like\"\n\t// commit/revert function which will be called at the end of the operation,\n\t// but before AfterCreate and Decorator, indicating via the argument\n\t// whether the operation succeeded. If this returns an error, the function\n\t// is not called. Almost nobody should use this hook.\n\tBeginCreate BeginCreateFunc\n\t// AfterCreate implements a further operation to run after a resource is\n\t// created and before it is decorated, optional.\n\tAfterCreate AfterCreateFunc\n\n\t// UpdateStrategy implements resource-specific behavior during updates.\n\tUpdateStrategy rest.RESTUpdateStrategy\n\t// BeginUpdate is an optional hook that returns a \"transaction-like\"\n\t// commit/revert function which will be called at the end of the operation,\n\t// but before AfterUpdate and Decorator, indicating via the argument\n\t// whether the operation succeeded. If this returns an error, the function\n\t// is not called. Almost nobody should use this hook.\n\tBeginUpdate BeginUpdateFunc\n\t// AfterUpdate implements a further operation to run after a resource is\n\t// updated and before it is decorated, optional.\n\tAfterUpdate AfterUpdateFunc\n\n\t// DeleteStrategy implements resource-specific behavior during deletion.\n\tDeleteStrategy rest.RESTDeleteStrategy\n\t// AfterDelete implements a further operation to run after a resource is\n\t// deleted and before it is decorated, optional.\n\tAfterDelete AfterDeleteFunc\n\t// ReturnDeletedObject determines whether the Store returns the object\n\t// that was deleted. Otherwise, return a generic success status response.\n\tReturnDeletedObject bool\n\t// ShouldDeleteDuringUpdate is an optional function to determine whether\n\t// an update from existing to obj should result in a delete.\n\t// If specified, this is checked in addition to standard finalizer,\n\t// deletionTimestamp, and deletionGracePeriodSeconds checks.\n\tShouldDeleteDuringUpdate func(ctx context.Context, key string, obj, existing runtime.Object) bool\n\n\t// TableConvertor is an optional interface for transforming items or lists\n\t// of items into tabular output. If unset, the default will be used.\n\tTableConvertor rest.TableConvertor\n\n\t// ResetFieldsStrategy provides the fields reset by the strategy that\n\t// should not be modified by the user.\n\tResetFieldsStrategy rest.ResetFieldsStrategy\n\n\t// Storage is the interface for the underlying storage for the\n\t// resource. It is wrapped into a \"DryRunnableStorage\" that will\n\t// either pass-through or simply dry-run.\n\tStorage DryRunnableStorage\n\t// StorageVersioner outputs the <group/version/kind> an object will be\n\t// converted to before persisted in etcd, given a list of possible\n\t// kinds of the object.\n\t// If the StorageVersioner is nil, apiserver will leave the\n\t// storageVersionHash as empty in the discovery document.\n\tStorageVersioner runtime.GroupVersioner\n\t// Called to cleanup clients used by the underlying Storage; optional.\n\tDestroyFunc func()\n}\n\n// Note: the rest.StandardStorage interface aggregates the common REST verbs\nvar _ rest.StandardStorage = &Store{}\nvar _ rest.TableConvertor = &Store{}\nvar _ GenericStore = &Store{}\n\nconst (\n\tOptimisticLockErrorMsg = \"the object has been modified; please apply your changes to the latest version and try again\"\n\tresourceCountPollPeriodJitter = 1.2\n)\n\n// NamespaceKeyRootFunc is the default function for constructing storage paths\n// to resource directories enforcing namespace rules.\nfunc NamespaceKeyRootFunc(ctx context.Context, prefix string) string {\n\tkey := prefix\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif ok && len(ns) > 0 {\n\t\tkey = key + \"/\" + ns\n\t}\n\treturn key\n}\n\n// NamespaceKeyFunc is the default function for constructing storage paths to\n// a resource relative to the given prefix enforcing namespace rules. If the\n// context does not contain a namespace, it errors.\nfunc NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {\n\tkey := NamespaceKeyRootFunc(ctx, prefix)\n\tns, ok := genericapirequest.NamespaceFrom(ctx)\n\tif !ok || len(ns) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Namespace parameter required.\")\n\t}\n\tif len(name) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Name parameter required.\")\n\t}\n\tif msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {\n\t\treturn \"\", apierrors.NewBadRequest(fmt.Sprintf(\"Name parameter invalid: %q: %s\", name, strings.Join(msgs, \";\")))\n\t}\n\tkey = key + \"/\" + name\n\treturn key, nil\n}\n\n// NoNamespaceKeyFunc is the default function for constructing storage paths\n// to a resource relative to the given prefix without a namespace.\nfunc NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {\n\tif len(name) == 0 {\n\t\treturn \"\", apierrors.NewBadRequest(\"Name parameter required.\")\n\t}\n\tif msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {\n\t\treturn \"\", apierrors.NewBadRequest(fmt.Sprintf(\"Name parameter invalid: %q: %s\", name, strings.Join(msgs, \";\")))\n\t}\n\tkey := prefix + \"/\" + name\n\treturn key, nil\n}\n\n// New implements RESTStorage.New.\nfunc (e *Store) New() runtime.Object {\n\treturn e.NewFunc()\n}\n\n// NewList implements rest.Lister.\nfunc (e *Store) NewList() runtime.Object {\n\treturn e.NewListFunc()\n}\n\n// NamespaceScoped indicates whether the resource is namespaced\nfunc (e *Store) NamespaceScoped() bool {\n\tif e.CreateStrategy != nil {\n\t\treturn e.CreateStrategy.NamespaceScoped()\n\t}\n\tif e.UpdateStrategy != nil {\n\t\treturn e.UpdateStrategy.NamespaceScoped()\n\t}\n\n\tpanic(\"programmer error: no CRUD for resource, you're crazy, override NamespaceScoped too\")\n}\n\n// GetCreateStrategy implements GenericStore.\nfunc (e *Store) GetCreateStrategy() rest.RESTCreateStrategy {\n\treturn e.CreateStrategy\n}\n\n// GetUpdateStrategy implements GenericStore.\nfunc (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy {\n\treturn e.UpdateStrategy\n}\n\n// GetDeleteStrategy implements GenericStore.\nfunc (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy {\n\treturn e.DeleteStrategy\n}\n\n// List returns a list of items matching labels and field according to the\n// store's PredicateFunc.\nfunc (e *Store) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {\n\tlabel := labels.Everything()\n\tif options != nil && options.LabelSelector != nil {\n\t\tlabel = options.LabelSelector\n\t}\n\tfield := fields.Everything()\n\tif options != nil && options.FieldSelector != nil {\n\t\tfield = options.FieldSelector\n\t}\n\tout, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, nil\n}\n\n// ListPredicate returns a list of all the items matching the given\n// SelectionPredicate.\nfunc (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) {\n\tif options == nil {\n\t\t// By default we should serve the request from etcd.\n\t\toptions = &metainternalversion.ListOptions{ResourceVersion: \"\"}\n\t}\n\tp.Limit = options.Limit\n\tp.Continue = options.Continue\n\tlist := e.NewListFunc()\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tstorageOpts := storage.ListOptions{ResourceVersion: options.ResourceVersion, ResourceVersionMatch: options.ResourceVersionMatch, Predicate: p}\n\tif name, ok := p.MatchesSingle(); ok {\n\t\tif key, err := e.KeyFunc(ctx, name); err == nil {\n\t\t\terr := e.Storage.GetToList(ctx, key, storageOpts, list)\n\t\t\treturn list, storeerr.InterpretListError(err, qualifiedResource)\n\t\t}\n\t\t// if we cannot extract a key based on the current context, the optimization is skipped\n\t}\n\n\terr := e.Storage.List(ctx, e.KeyRootFunc(ctx), storageOpts, list)\n\treturn list, storeerr.InterpretListError(err, qualifiedResource)\n}\n\n// finishNothing is a do-nothing FinishFunc.\nfunc finishNothing(context.Context, bool) {}\n\n// Create inserts a new item according to the unique key from the object.\nfunc (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {\n\tvar finishCreate FinishFunc = finishNothing\n\n\tif e.BeginCreate != nil {\n\t\tfn, err := e.BeginCreate(ctx, obj, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfinishCreate = fn\n\t\tdefer func() {\n\t\t\tfinishCreate(ctx, false)\n\t\t}()\n\t}\n\n\tif err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {\n\t\treturn nil, err\n\t}\n\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t// handling chain wants to enforce.\n\tif createValidation != nil {\n\t\tif err := createValidation(ctx, obj.DeepCopyObject()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tname, err := e.ObjectNameFunc(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tttl, err := e.calculateTTL(obj, 0, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := e.NewFunc()\n\tif err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil {\n\t\terr = storeerr.InterpretCreateError(err, qualifiedResource, name)\n\t\terr = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)\n\t\tif !apierrors.IsAlreadyExists(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\tif errGet := e.Storage.Get(ctx, key, storage.GetOptions{}, out); errGet != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taccessor, errGetAcc := meta.Accessor(out)\n\t\tif errGetAcc != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif accessor.GetDeletionTimestamp() != nil {\n\t\t\tmsg := &err.(*apierrors.StatusError).ErrStatus.Message\n\t\t\t*msg = fmt.Sprintf(\"object is being deleted: %s\", *msg)\n\t\t}\n\t\treturn nil, err\n\t}\n\t// The operation has succeeded. Call the finish function if there is one,\n\t// and then make sure the defer doesn't call it again.\n\tfn := finishCreate\n\tfinishCreate = finishNothing\n\tfn(ctx, true)\n\n\tif e.AfterCreate != nil {\n\t\te.AfterCreate(out, options)\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, nil\n}\n\n// ShouldDeleteDuringUpdate is the default function for\n// checking if an object should be deleted during an update.\n// It checks if the new object has no finalizers,\n// the existing object's deletionTimestamp is set, and\n// the existing object's deletionGracePeriodSeconds is 0 or nil\nfunc ShouldDeleteDuringUpdate(ctx context.Context, key string, obj, existing runtime.Object) bool {\n\tnewMeta, err := meta.Accessor(obj)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\toldMeta, err := meta.Accessor(existing)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\tif len(newMeta.GetFinalizers()) > 0 {\n\t\t// don't delete with finalizers remaining in the new object\n\t\treturn false\n\t}\n\tif oldMeta.GetDeletionTimestamp() == nil {\n\t\t// don't delete if the existing object hasn't had a delete request made\n\t\treturn false\n\t}\n\t// delete if the existing object has no grace period or a grace period of 0\n\treturn oldMeta.GetDeletionGracePeriodSeconds() == nil || *oldMeta.GetDeletionGracePeriodSeconds() == 0\n}\n\n// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list.\n// Used for objects that are either been finalized or have never initialized.\nfunc (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, options *metav1.DeleteOptions) (runtime.Object, bool, error) {\n\tout := e.NewFunc()\n\tklog.V(6).Infof(\"going to delete %s from registry, triggered by update\", name)\n\t// Using the rest.ValidateAllObjectFunc because the request is an UPDATE request and has already passed the admission for the UPDATE verb.\n\tif err := e.Storage.Delete(ctx, key, out, preconditions, rest.ValidateAllObjectFunc, dryrun.IsDryRun(options.DryRun), nil); err != nil {\n\t\t// Deletion is racy, i.e., there could be multiple update\n\t\t// requests to remove all finalizers from the object, so we\n\t\t// ignore the NotFound error.\n\t\tif storage.IsNotFound(err) {\n\t\t\t_, err := e.finalizeDelete(ctx, obj, true, options)\n\t\t\t// clients are expecting an updated object if a PUT succeeded,\n\t\t\t// but finalizeDelete returns a metav1.Status, so return\n\t\t\t// the object in the request instead.\n\t\t\treturn obj, false, err\n\t\t}\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name)\n\t}\n\t_, err := e.finalizeDelete(ctx, out, true, options)\n\t// clients are expecting an updated object if a PUT succeeded, but\n\t// finalizeDelete returns a metav1.Status, so return the object in\n\t// the request instead.\n\treturn obj, false, err\n}\n\n// Update performs an atomic update and set of the object. Returns the result of the update\n// or an error. If the registry allows create-on-update, the create flow will be executed.\n// A bool is returned along with the object and any errors, to indicate object creation.\nfunc (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar (\n\t\tcreatingObj runtime.Object\n\t\tcreating = false\n\t)\n\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tstoragePreconditions := &storage.Preconditions{}\n\tif preconditions := objInfo.Preconditions(); preconditions != nil {\n\t\tstoragePreconditions.UID = preconditions.UID\n\t\tstoragePreconditions.ResourceVersion = preconditions.ResourceVersion\n\t}\n\n\tout := e.NewFunc()\n\t// deleteObj is only used in case a deletion is carried out\n\tvar deleteObj runtime.Object\n\terr = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {\n\t\texistingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif existingResourceVersion == 0 {\n\t\t\tif !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate {\n\t\t\t\treturn nil, nil, apierrors.NewNotFound(qualifiedResource, name)\n\t\t\t}\n\t\t}\n\n\t\t// Given the existing object, get the new object\n\t\tobj, err := objInfo.UpdatedObject(ctx, existing)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// If AllowUnconditionalUpdate() is true and the object specified by\n\t\t// the user does not have a resource version, then we populate it with\n\t\t// the latest version. Else, we check that the version specified by\n\t\t// the user matches the version of latest storage object.\n\t\tnewResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tdoUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()\n\n\t\tif existingResourceVersion == 0 {\n\t\t\tvar finishCreate FinishFunc = finishNothing\n\n\t\t\tif e.BeginCreate != nil {\n\t\t\t\tfn, err := e.BeginCreate(ctx, obj, newCreateOptionsFromUpdateOptions(options))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t\tfinishCreate = fn\n\t\t\t\tdefer func() {\n\t\t\t\t\tfinishCreate(ctx, false)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\tcreating = true\n\t\t\tcreatingObj = obj\n\t\t\tif err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t\t\t// handling chain wants to enforce.\n\t\t\tif createValidation != nil {\n\t\t\t\tif err := createValidation(ctx, obj.DeepCopyObject()); err != nil {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tttl, err := e.calculateTTL(obj, 0, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\t// The operation has succeeded. Call the finish function if there is one,\n\t\t\t// and then make sure the defer doesn't call it again.\n\t\t\tfn := finishCreate\n\t\t\tfinishCreate = finishNothing\n\t\t\tfn(ctx, true)\n\n\t\t\treturn obj, &ttl, nil\n\t\t}\n\n\t\tcreating = false\n\t\tcreatingObj = nil\n\t\tif doUnconditionalUpdate {\n\t\t\t// Update the object's resource version to match the latest\n\t\t\t// storage object's resource version.\n\t\t\terr = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// Check if the object's resource version matches the latest\n\t\t\t// resource version.\n\t\t\tif newResourceVersion == 0 {\n\t\t\t\t// TODO: The Invalid error should have a field for Resource.\n\t\t\t\t// After that field is added, we should fill the Resource and\n\t\t\t\t// leave the Kind field empty. See the discussion in #18526.\n\t\t\t\tqualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}\n\t\t\t\tfieldErrList := field.ErrorList{field.Invalid(field.NewPath(\"metadata\").Child(\"resourceVersion\"), newResourceVersion, \"must be specified for an update\")}\n\t\t\t\treturn nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList)\n\t\t\t}\n\t\t\tif newResourceVersion != existingResourceVersion {\n\t\t\t\treturn nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))\n\t\t\t}\n\t\t}\n\n\t\tvar finishUpdate FinishFunc = finishNothing\n\n\t\tif e.BeginUpdate != nil {\n\t\t\tfn, err := e.BeginUpdate(ctx, obj, existing, options)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tfinishUpdate = fn\n\t\t\tdefer func() {\n\t\t\t\tfinishUpdate(ctx, false)\n\t\t\t}()\n\t\t}\n\n\t\tif err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t// at this point we have a fully formed object. It is time to call the validators that the apiserver\n\t\t// handling chain wants to enforce.\n\t\tif updateValidation != nil {\n\t\t\tif err := updateValidation(ctx, obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t\t// Check the default delete-during-update conditions, and store-specific conditions if provided\n\t\tif ShouldDeleteDuringUpdate(ctx, key, obj, existing) &&\n\t\t\t(e.ShouldDeleteDuringUpdate == nil || e.ShouldDeleteDuringUpdate(ctx, key, obj, existing)) {\n\t\t\tdeleteObj = obj\n\t\t\treturn nil, nil, errEmptiedFinalizers\n\t\t}\n\t\tttl, err := e.calculateTTL(obj, res.TTL, true)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// The operation has succeeded. Call the finish function if there is one,\n\t\t// and then make sure the defer doesn't call it again.\n\t\tfn := finishUpdate\n\t\tfinishUpdate = finishNothing\n\t\tfn(ctx, true)\n\n\t\tif int64(ttl) != res.TTL {\n\t\t\treturn obj, &ttl, nil\n\t\t}\n\t\treturn obj, nil, nil\n\t}, dryrun.IsDryRun(options.DryRun), nil)\n\n\tif err != nil {\n\t\t// delete the object\n\t\tif err == errEmptiedFinalizers {\n\t\t\treturn e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions, newDeleteOptionsFromUpdateOptions(options))\n\t\t}\n\t\tif creating {\n\t\t\terr = storeerr.InterpretCreateError(err, qualifiedResource, name)\n\t\t\terr = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj)\n\t\t} else {\n\t\t\terr = storeerr.InterpretUpdateError(err, qualifiedResource, name)\n\t\t}\n\t\treturn nil, false, err\n\t}\n\n\tif creating {\n\t\tif e.AfterCreate != nil {\n\t\t\te.AfterCreate(out, newCreateOptionsFromUpdateOptions(options))\n\t\t}\n\t} else {\n\t\tif e.AfterUpdate != nil {\n\t\t\te.AfterUpdate(out, options)\n\t\t}\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(out)\n\t}\n\treturn out, creating, nil\n}\n\n// This is a helper to convert UpdateOptions to CreateOptions for the\n// create-on-update path.\nfunc newCreateOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.CreateOptions {\n\tco := &metav1.CreateOptions{\n\t\tDryRun: in.DryRun,\n\t\tFieldManager: in.FieldManager,\n\t}\n\tco.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"CreateOptions\"))\n\treturn co\n}\n\n// This is a helper to convert UpdateOptions to DeleteOptions for the\n// delete-on-update path.\nfunc newDeleteOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.DeleteOptions {\n\tdo := &metav1.DeleteOptions{\n\t\tDryRun: in.DryRun,\n\t}\n\tdo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind(\"DeleteOptions\"))\n\treturn do\n}\n\n// Get retrieves the item from storage.\nfunc (e *Store) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {\n\tobj := e.NewFunc()\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := e.Storage.Get(ctx, key, storage.GetOptions{ResourceVersion: options.ResourceVersion}, obj); err != nil {\n\t\treturn nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name)\n\t}\n\tif e.Decorator != nil {\n\t\te.Decorator(obj)\n\t}\n\treturn obj, nil\n}\n\n// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info.\n// If the context has no request info, DefaultQualifiedResource is used.\nfunc (e *Store) qualifiedResourceFromContext(ctx context.Context) schema.GroupResource {\n\tif info, ok := genericapirequest.RequestInfoFrom(ctx); ok {\n\t\treturn schema.GroupResource{Group: info.APIGroup, Resource: info.Resource}\n\t}\n\t// some implementations access storage directly and thus the context has no RequestInfo\n\treturn e.DefaultQualifiedResource\n}\n\nvar (\n\terrAlreadyDeleting = fmt.Errorf(\"abort delete\")\n\terrDeleteNow = fmt.Errorf(\"delete now\")\n\terrEmptiedFinalizers = fmt.Errorf(\"emptied finalizers\")\n)\n\n// shouldOrphanDependents returns true if the finalizer for orphaning should be set\n// updated for FinalizerOrphanDependents. In the order of highest to lowest\n// priority, there are three factors affect whether to add/remove the\n// FinalizerOrphanDependents: options, existing finalizers of the object,\n// and e.DeleteStrategy.DefaultGarbageCollectionPolicy.\nfunc shouldOrphanDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {\n\t// Get default GC policy from this REST object type\n\tgcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy)\n\tvar defaultGCPolicy rest.GarbageCollectionPolicy\n\tif ok {\n\t\tdefaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx)\n\t}\n\n\tif defaultGCPolicy == rest.Unsupported {\n\t\t// return false to indicate that we should NOT orphan\n\t\treturn false\n\t}\n\n\t// An explicit policy was set at deletion time, that overrides everything\n\t//lint:ignore SA1019 backwards compatibility\n\tif options != nil && options.OrphanDependents != nil {\n\t\t//lint:ignore SA1019 backwards compatibility\n\t\treturn *options.OrphanDependents\n\t}\n\tif options != nil && options.PropagationPolicy != nil {\n\t\tswitch *options.PropagationPolicy {\n\t\tcase metav1.DeletePropagationOrphan:\n\t\t\treturn true\n\t\tcase metav1.DeletePropagationBackground, metav1.DeletePropagationForeground:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If a finalizer is set in the object, it overrides the default\n\t// validation should make sure the two cases won't be true at the same time.\n\tfinalizers := accessor.GetFinalizers()\n\tfor _, f := range finalizers {\n\t\tswitch f {\n\t\tcase metav1.FinalizerOrphanDependents:\n\t\t\treturn true\n\t\tcase metav1.FinalizerDeleteDependents:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Get default orphan policy from this REST object type if it exists\n\treturn defaultGCPolicy == rest.OrphanDependents\n}\n\n// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set\n// updated for FinalizerDeleteDependents. In the order of highest to lowest\n// priority, there are three factors affect whether to add/remove the\n// FinalizerDeleteDependents: options, existing finalizers of the object, and\n// e.DeleteStrategy.DefaultGarbageCollectionPolicy.\nfunc shouldDeleteDependents(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {\n\t// Get default GC policy from this REST object type\n\tif gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported {\n\t\t// return false to indicate that we should NOT delete in foreground\n\t\treturn false\n\t}\n\n\t// If an explicit policy was set at deletion time, that overrides both\n\t//lint:ignore SA1019 backwards compatibility\n\tif options != nil && options.OrphanDependents != nil {\n\t\treturn false\n\t}\n\tif options != nil && options.PropagationPolicy != nil {\n\t\tswitch *options.PropagationPolicy {\n\t\tcase metav1.DeletePropagationForeground:\n\t\t\treturn true\n\t\tcase metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan:\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If a finalizer is set in the object, it overrides the default\n\t// validation has made sure the two cases won't be true at the same time.\n\tfinalizers := accessor.GetFinalizers()\n\tfor _, f := range finalizers {\n\t\tswitch f {\n\t\tcase metav1.FinalizerDeleteDependents:\n\t\t\treturn true\n\t\tcase metav1.FinalizerOrphanDependents:\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn false\n}\n\n// deletionFinalizersForGarbageCollection analyzes the object and delete options\n// to determine whether the object is in need of finalization by the garbage\n// collector. If so, returns the set of deletion finalizers to apply and a bool\n// indicating whether the finalizer list has changed and is in need of updating.\n//\n// The finalizers returned are intended to be handled by the garbage collector.\n// If garbage collection is disabled for the store, this function returns false\n// to ensure finalizers aren't set which will never be cleared.\nfunc deletionFinalizersForGarbageCollection(ctx context.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) {\n\tif !e.EnableGarbageCollection {\n\t\treturn false, []string{}\n\t}\n\tshouldOrphan := shouldOrphanDependents(ctx, e, accessor, options)\n\tshouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options)\n\tnewFinalizers := []string{}\n\n\t// first remove both finalizers, add them back if needed.\n\tfor _, f := range accessor.GetFinalizers() {\n\t\tif f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents {\n\t\t\tcontinue\n\t\t}\n\t\tnewFinalizers = append(newFinalizers, f)\n\t}\n\n\tif shouldOrphan {\n\t\tnewFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents)\n\t}\n\tif shouldDeleteDependentInForeground {\n\t\tnewFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents)\n\t}\n\n\toldFinalizerSet := sets.NewString(accessor.GetFinalizers()...)\n\tnewFinalizersSet := sets.NewString(newFinalizers...)\n\tif oldFinalizerSet.Equal(newFinalizersSet) {\n\t\treturn false, accessor.GetFinalizers()\n\t}\n\treturn true, newFinalizers\n}\n\n// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the\n// DeletionTimestamp to \"now\" if there is no existing deletionTimestamp or if the existing\n// deletionTimestamp is further in future. Finalizers are watching for such updates and will\n// finalize the object if their IDs are present in the object's Finalizers list.\nfunc markAsDeleting(obj runtime.Object, now time.Time) (err error) {\n\tobjectMeta, kerr := meta.Accessor(obj)\n\tif kerr != nil {\n\t\treturn kerr\n\t}\n\t// This handles Generation bump for resources that don't support graceful\n\t// deletion. For resources that support graceful deletion is handle in\n\t// pkg/api/rest/delete.go\n\tif objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 {\n\t\tobjectMeta.SetGeneration(objectMeta.GetGeneration() + 1)\n\t}\n\texistingDeletionTimestamp := objectMeta.GetDeletionTimestamp()\n\tif existingDeletionTimestamp == nil || existingDeletionTimestamp.After(now) {\n\t\tmetaNow := metav1.NewTime(now)\n\t\tobjectMeta.SetDeletionTimestamp(&metaNow)\n\t}\n\tvar zero int64 = 0\n\tobjectMeta.SetDeletionGracePeriodSeconds(&zero)\n\treturn nil\n}\n\n// updateForGracefulDeletionAndFinalizers updates the given object for\n// graceful deletion and finalization by setting the deletion timestamp and\n// grace period seconds (graceful deletion) and updating the list of\n// finalizers (finalization); it returns:\n//\n// 1. an error\n// 2. a boolean indicating that the object was not found, but it should be\n// ignored\n// 3. a boolean indicating that the object's grace period is exhausted and it\n// should be deleted immediately\n// 4. a new output object with the state that was updated\n// 5. a copy of the last existing state of the object\nfunc (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, deleteValidation rest.ValidateObjectFunc, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {\n\tlastGraceful := int64(0)\n\tvar pendingFinalizers bool\n\tout = e.NewFunc()\n\terr = e.Storage.GuaranteedUpdate(\n\t\tctx,\n\t\tkey,\n\t\tout,\n\t\tfalse, /* ignoreNotFound */\n\t\t&preconditions,\n\t\tstorage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) {\n\t\t\tif err := deleteValidation(ctx, existing); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgraceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif pendingGraceful {\n\t\t\t\treturn nil, errAlreadyDeleting\n\t\t\t}\n\n\t\t\t// Add/remove the orphan finalizer as the options dictates.\n\t\t\t// Note that this occurs after checking pendingGraceufl, so\n\t\t\t// finalizers cannot be updated via DeleteOptions if deletion has\n\t\t\t// started.\n\t\t\texistingAccessor, err := meta.Accessor(existing)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tneedsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options)\n\t\t\tif needsUpdate {\n\t\t\t\texistingAccessor.SetFinalizers(newFinalizers)\n\t\t\t}\n\n\t\t\tpendingFinalizers = len(existingAccessor.GetFinalizers()) != 0\n\t\t\tif !graceful {\n\t\t\t\t// set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion\n\t\t\t\tif pendingFinalizers {\n\t\t\t\t\tklog.V(6).Infof(\"update the DeletionTimestamp to \\\"now\\\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers\", name)\n\t\t\t\t\terr = markAsDeleting(existing, time.Now())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn existing, nil\n\t\t\t\t}\n\t\t\t\treturn nil, errDeleteNow\n\t\t\t}\n\t\t\tlastGraceful = *options.GracePeriodSeconds\n\t\t\tlastExisting = existing\n\t\t\treturn existing, nil\n\t\t}),\n\t\tdryrun.IsDryRun(options.DryRun),\n\t\tnil,\n\t)\n\tswitch err {\n\tcase nil:\n\t\t// If there are pending finalizers, we never delete the object immediately.\n\t\tif pendingFinalizers {\n\t\t\treturn nil, false, false, out, lastExisting\n\t\t}\n\t\tif lastGraceful > 0 {\n\t\t\treturn nil, false, false, out, lastExisting\n\t\t}\n\t\t// If we are here, the registry supports grace period mechanism and\n\t\t// we are intentionally delete gracelessly. In this case, we may\n\t\t// enter a race with other k8s components. If other component wins\n\t\t// the race, the object will not be found, and we should tolerate\n\t\t// the NotFound error. See\n\t\t// https://github.com/kubernetes/kubernetes/issues/19403 for\n\t\t// details.\n\t\treturn nil, true, true, out, lastExisting\n\tcase errDeleteNow:\n\t\t// we've updated the object to have a zero grace period, or it's already at 0, so\n\t\t// we should fall through and truly delete the object.\n\t\treturn nil, false, true, out, lastExisting\n\tcase errAlreadyDeleting:\n\t\tout, err = e.finalizeDelete(ctx, in, true, options)\n\t\treturn err, false, false, out, lastExisting\n\tdefault:\n\t\treturn storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting\n\t}\n}\n\n// Delete removes the item from storage.\nfunc (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {\n\tkey, err := e.KeyFunc(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tobj := e.NewFunc()\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tif err = e.Storage.Get(ctx, key, storage.GetOptions{}, obj); err != nil {\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)\n\t}\n\n\t// support older consumers of delete by treating \"nil\" as delete immediately\n\tif options == nil {\n\t\toptions = metav1.NewDeleteOptions(0)\n\t}\n\tvar preconditions storage.Preconditions\n\tif options.Preconditions != nil {\n\t\tpreconditions.UID = options.Preconditions.UID\n\t\tpreconditions.ResourceVersion = options.Preconditions.ResourceVersion\n\t}\n\tgraceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\t// this means finalizers cannot be updated via DeleteOptions if a deletion is already pending\n\tif pendingGraceful {\n\t\tout, err := e.finalizeDelete(ctx, obj, false, options)\n\t\treturn out, false, err\n\t}\n\t// check if obj has pending finalizers\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, false, apierrors.NewInternalError(err)\n\t}\n\tpendingFinalizers := len(accessor.GetFinalizers()) != 0\n\tvar ignoreNotFound bool\n\tvar deleteImmediately bool = true\n\tvar lastExisting, out runtime.Object\n\n\t// Handle combinations of graceful deletion and finalization by issuing\n\t// the correct updates.\n\tshouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options)\n\t// TODO: remove the check, because we support no-op updates now.\n\tif graceful || pendingFinalizers || shouldUpdateFinalizers {\n\t\terr, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, deleteValidation, obj)\n\t\t// Update the preconditions.ResourceVersion if set since we updated the object.\n\t\tif err == nil && deleteImmediately && preconditions.ResourceVersion != nil {\n\t\t\taccessor, err = meta.Accessor(out)\n\t\t\tif err != nil {\n\t\t\t\treturn out, false, apierrors.NewInternalError(err)\n\t\t\t}\n\t\t\tresourceVersion := accessor.GetResourceVersion()\n\t\t\tpreconditions.ResourceVersion = &resourceVersion\n\t\t}\n\t}\n\n\t// !deleteImmediately covers all cases where err != nil. We keep both to be future-proof.\n\tif !deleteImmediately || err != nil {\n\t\treturn out, false, err\n\t}\n\n\t// Going further in this function is not useful when we are\n\t// performing a dry-run request. Worse, it will actually\n\t// override \"out\" with the version of the object in database\n\t// that doesn't have the finalizer and deletiontimestamp set\n\t// (because the update above was dry-run too). If we already\n\t// have that version available, let's just return it now,\n\t// otherwise, we can call dry-run delete that will get us the\n\t// latest version of the object.\n\tif dryrun.IsDryRun(options.DryRun) && out != nil {\n\t\treturn out, true, nil\n\t}\n\n\t// delete immediately, or no graceful deletion supported\n\tklog.V(6).Infof(\"going to delete %s from registry: \", name)\n\tout = e.NewFunc()\n\tif err := e.Storage.Delete(ctx, key, out, &preconditions, storage.ValidateObjectFunc(deleteValidation), dryrun.IsDryRun(options.DryRun), nil); err != nil {\n\t\t// Please refer to the place where we set ignoreNotFound for the reason\n\t\t// why we ignore the NotFound error .\n\t\tif storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil {\n\t\t\t// The lastExisting object may not be the last state of the object\n\t\t\t// before its deletion, but it's the best approximation.\n\t\t\tout, err := e.finalizeDelete(ctx, lastExisting, true, options)\n\t\t\treturn out, true, err\n\t\t}\n\t\treturn nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)\n\t}\n\tout, err = e.finalizeDelete(ctx, out, true, options)\n\treturn out, true, err\n}\n\n// DeleteReturnsDeletedObject implements the rest.MayReturnFullObjectDeleter interface\nfunc (e *Store) DeleteReturnsDeletedObject() bool {\n\treturn e.ReturnDeletedObject\n}\n\n// DeleteCollection removes all items returned by List with a given ListOptions from storage.\n//\n// DeleteCollection is currently NOT atomic. It can happen that only subset of objects\n// will be deleted from storage, and then an error will be returned.\n// In case of success, the list of deleted objects will be returned.\n//\n// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we\n// are removing all objects of a given type) with the current API (it's technically\n// possibly with storage API, but watch is not delivered correctly then).\n// It will be possible to fix it with v3 etcd API.\nfunc (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {\n\tif listOptions == nil {\n\t\tlistOptions = &metainternalversion.ListOptions{}\n\t} else {\n\t\tlistOptions = listOptions.DeepCopy()\n\t}\n\n\tlistObj, err := e.List(ctx, listOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titems, err := meta.ExtractList(listObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(items) == 0 {\n\t\t// Nothing to delete, return now\n\t\treturn listObj, nil\n\t}\n\t// Spawn a number of goroutines, so that we can issue requests to storage\n\t// in parallel to speed up deletion.\n\t// It is proportional to the number of items to delete, up to\n\t// DeleteCollectionWorkers (it doesn't make much sense to spawn 16\n\t// workers to delete 10 items).\n\tworkersNumber := e.DeleteCollectionWorkers\n\tif workersNumber > len(items) {\n\t\tworkersNumber = len(items)\n\t}\n\tif workersNumber < 1 {\n\t\tworkersNumber = 1\n\t}\n\twg := sync.WaitGroup{}\n\ttoProcess := make(chan int, 2*workersNumber)\n\terrs := make(chan error, workersNumber+1)\n\n\tgo func() {\n\t\tdefer utilruntime.HandleCrash(func(panicReason interface{}) {\n\t\t\terrs <- fmt.Errorf(\"DeleteCollection distributor panicked: %v\", panicReason)\n\t\t})\n\t\tfor i := 0; i < len(items); i++ {\n\t\t\ttoProcess <- i\n\t\t}\n\t\tclose(toProcess)\n\t}()\n\n\twg.Add(workersNumber)\n\tfor i := 0; i < workersNumber; i++ {\n\t\tgo func() {\n\t\t\t// panics don't cross goroutine boundaries\n\t\t\tdefer utilruntime.HandleCrash(func(panicReason interface{}) {\n\t\t\t\terrs <- fmt.Errorf(\"DeleteCollection goroutine panicked: %v\", panicReason)\n\t\t\t})\n\t\t\tdefer wg.Done()\n\n\t\t\tfor index := range toProcess {\n\t\t\t\taccessor, err := meta.Accessor(items[index])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) {\n\t\t\t\t\tklog.V(4).Infof(\"Delete %s in DeleteCollection failed: %v\", accessor.GetName(), err)\n\t\t\t\t\terrs <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tselect {\n\tcase err := <-errs:\n\t\treturn nil, err\n\tdefault:\n\t\treturn listObj, nil\n\t}\n}\n\n// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and\n// returns the decorated deleted object if appropriate.\nfunc (e *Store) finalizeDelete(ctx context.Context, obj runtime.Object, runHooks bool, options *metav1.DeleteOptions) (runtime.Object, error) {\n\tif runHooks && e.AfterDelete != nil {\n\t\te.AfterDelete(obj, options)\n\t}\n\tif e.ReturnDeletedObject {\n\t\tif e.Decorator != nil {\n\t\t\te.Decorator(obj)\n\t\t}\n\t\treturn obj, nil\n\t}\n\t// Return information about the deleted object, which enables clients to\n\t// verify that the object was actually deleted and not waiting for finalizers.\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqualifiedResource := e.qualifiedResourceFromContext(ctx)\n\tdetails := &metav1.StatusDetails{\n\t\tName: accessor.GetName(),\n\t\tGroup: qualifiedResource.Group,\n\t\tKind: qualifiedResource.Resource, // Yes we set Kind field to resource.\n\t\tUID: accessor.GetUID(),\n\t}\n\tstatus := &metav1.Status{Status: metav1.StatusSuccess, Details: details}\n\treturn status, nil\n}\n\n// Watch makes a matcher for the given label and field, and calls\n// WatchPredicate. If possible, you should customize PredicateFunc to produce\n// a matcher that matches by key. SelectionPredicate does this for you\n// automatically.\nfunc (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {\n\tlabel := labels.Everything()\n\tif options != nil && options.LabelSelector != nil {\n\t\tlabel = options.LabelSelector\n\t}\n\tfield := fields.Everything()\n\tif options != nil && options.FieldSelector != nil {\n\t\tfield = options.FieldSelector\n\t}\n\tpredicate := e.PredicateFunc(label, field)\n\n\tresourceVersion := \"\"\n\tif options != nil {\n\t\tresourceVersion = options.ResourceVersion\n\t\tpredicate.AllowWatchBookmarks = options.AllowWatchBookmarks\n\t}\n\treturn e.WatchPredicate(ctx, predicate, resourceVersion)\n}\n\n// WatchPredicate starts a watch for the items that matches.\nfunc (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) {\n\tstorageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p}\n\tif name, ok := p.MatchesSingle(); ok {\n\t\tif key, err := e.KeyFunc(ctx, name); err == nil {\n\t\t\tw, err := e.Storage.Watch(ctx, key, storageOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif e.Decorator != nil {\n\t\t\t\treturn newDecoratedWatcher(ctx, w, e.Decorator), nil\n\t\t\t}\n\t\t\treturn w, nil\n\t\t}\n\t\t// if we cannot extract a key based on the current context, the\n\t\t// optimization is skipped\n\t}\n\n\tw, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), storageOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif e.Decorator != nil {\n\t\treturn newDecoratedWatcher(ctx, w, e.Decorator), nil\n\t}\n\treturn w, nil\n}\n\n// calculateTTL is a helper for retrieving the updated TTL for an object or\n// returning an error if the TTL cannot be calculated. The defaultTTL is\n// changed to 1 if less than zero. Zero means no TTL, not expire immediately.\nfunc (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {\n\t// TODO: validate this is assertion is still valid.\n\n\t// etcd may return a negative TTL for a node if the expiration has not\n\t// occurred due to server lag - we will ensure that the value is at least\n\t// set.\n\tif defaultTTL < 0 {\n\t\tdefaultTTL = 1\n\t}\n\tttl = uint64(defaultTTL)\n\tif e.TTLFunc != nil {\n\t\tttl, err = e.TTLFunc(obj, ttl, update)\n\t}\n\treturn ttl, err\n}\n\n// CompleteWithOptions updates the store with the provided options and\n// defaults common fields.\nfunc (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {\n\tif e.DefaultQualifiedResource.Empty() {\n\t\treturn fmt.Errorf(\"store %#v must have a non-empty qualified resource\", e)\n\t}\n\tif e.NewFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewFunc set\", e.DefaultQualifiedResource.String())\n\t}\n\tif e.NewListFunc == nil {\n\t\treturn fmt.Errorf(\"store for %s must have NewListFunc set\", e.DefaultQualifiedResource.String())\n\t}\n\tif (e.KeyRootFunc == nil) != (e.KeyFunc == nil) {\n\t\treturn fmt.Errorf(\"store for %s must set both KeyRootFunc and KeyFunc or neither\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif e.TableConvertor == nil {\n\t\treturn fmt.Errorf(\"store for %s must set TableConvertor; rest.NewDefaultTableConvertor(e.DefaultQualifiedResource) can be used to output just name/creation time\", e.DefaultQualifiedResource.String())\n\t}\n\n\tvar isNamespaced bool\n\tswitch {\n\tcase e.CreateStrategy != nil:\n\t\tisNamespaced = e.CreateStrategy.NamespaceScoped()\n\tcase e.UpdateStrategy != nil:\n\t\tisNamespaced = e.UpdateStrategy.NamespaceScoped()\n\tdefault:\n\t\treturn fmt.Errorf(\"store for %s must have CreateStrategy or UpdateStrategy set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif e.DeleteStrategy == nil {\n\t\treturn fmt.Errorf(\"store for %s must have DeleteStrategy set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tif options.RESTOptions == nil {\n\t\treturn fmt.Errorf(\"options for %s must have RESTOptions set\", e.DefaultQualifiedResource.String())\n\t}\n\n\tattrFunc := options.AttrFunc\n\tif attrFunc == nil {\n\t\tif isNamespaced {\n\t\t\tattrFunc = storage.DefaultNamespaceScopedAttr\n\t\t} else {\n\t\t\tattrFunc = storage.DefaultClusterScopedAttr\n\t\t}\n\t}\n\tif e.PredicateFunc == nil {\n\t\te.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {\n\t\t\treturn storage.SelectionPredicate{\n\t\t\t\tLabel: label,\n\t\t\t\tField: field,\n\t\t\t\tGetAttrs: attrFunc,\n\t\t\t}\n\t\t}\n\t}\n\n\terr := validateIndexers(options.Indexers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// ResourcePrefix must come from the underlying factory\n\tprefix := opts.ResourcePrefix\n\tif !strings.HasPrefix(prefix, \"/\") {\n\t\tprefix = \"/\" + prefix\n\t}\n\tif prefix == \"/\" {\n\t\treturn fmt.Errorf(\"store for %s has an invalid prefix %q\", e.DefaultQualifiedResource.String(), opts.ResourcePrefix)\n\t}\n\n\t// Set the default behavior for storage key generation\n\tif e.KeyRootFunc == nil && e.KeyFunc == nil {\n\t\tif isNamespaced {\n\t\t\te.KeyRootFunc = func(ctx context.Context) string {\n\t\t\t\treturn NamespaceKeyRootFunc(ctx, prefix)\n\t\t\t}\n\t\t\te.KeyFunc = func(ctx context.Context, name string) (string, error) {\n\t\t\t\treturn NamespaceKeyFunc(ctx, prefix, name)\n\t\t\t}\n\t\t} else {\n\t\t\te.KeyRootFunc = func(ctx context.Context) string {\n\t\t\t\treturn prefix\n\t\t\t}\n\t\t\te.KeyFunc = func(ctx context.Context, name string) (string, error) {\n\t\t\t\treturn NoNamespaceKeyFunc(ctx, prefix, name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// We adapt the store's keyFunc so that we can use it with the StorageDecorator\n\t// without making any assumptions about where objects are stored in etcd\n\tkeyFunc := func(obj runtime.Object) (string, error) {\n\t\taccessor, err := meta.Accessor(obj)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif isNamespaced {\n\t\t\treturn e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName())\n\t\t}\n\n\t\treturn e.KeyFunc(genericapirequest.NewContext(), accessor.GetName())\n\t}\n\n\tif e.DeleteCollectionWorkers == 0 {\n\t\te.DeleteCollectionWorkers = opts.DeleteCollectionWorkers\n\t}\n\n\te.EnableGarbageCollection = opts.EnableGarbageCollection\n\n\tif e.ObjectNameFunc == nil {\n\t\te.ObjectNameFunc = func(obj runtime.Object) (string, error) {\n\t\t\taccessor, err := meta.Accessor(obj)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn accessor.GetName(), nil\n\t\t}\n\t}\n\n\tif e.Storage.Storage == nil {\n\t\te.Storage.Codec = opts.StorageConfig.Codec\n\t\tvar err error\n\t\te.Storage.Storage, e.DestroyFunc, err = opts.Decorator(\n\t\t\topts.StorageConfig,\n\t\t\tprefix,\n\t\t\tkeyFunc,\n\t\t\te.NewFunc,\n\t\t\te.NewListFunc,\n\t\t\tattrFunc,\n\t\t\toptions.TriggerFunc,\n\t\t\toptions.Indexers,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.StorageVersioner = opts.StorageConfig.EncodeVersioner\n\n\t\tif opts.CountMetricPollPeriod > 0 {\n\t\t\tstopFunc := e.startObservingCount(opts.CountMetricPollPeriod)\n\t\t\tpreviousDestroy := e.DestroyFunc\n\t\t\te.DestroyFunc = func() {\n\t\t\t\tstopFunc()\n\t\t\t\tif previousDestroy != nil {\n\t\t\t\t\tpreviousDestroy()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// startObservingCount starts monitoring given prefix and periodically updating metrics. It returns a function to stop collection.\nfunc (e *Store) startObservingCount(period time.Duration) func() {\n\tprefix := e.KeyRootFunc(genericapirequest.NewContext())\n\tresourceName := e.DefaultQualifiedResource.String()\n\tklog.V(2).Infof(\"Monitoring %v count at <storage-prefix>/%v\", resourceName, prefix)\n\tstopCh := make(chan struct{})\n\tgo wait.JitterUntil(func() {\n\t\tcount, err := e.Storage.Count(prefix)\n\t\tif err != nil {\n\t\t\tklog.V(5).Infof(\"Failed to update storage count metric: %v\", err)\n\t\t\tmetrics.UpdateObjectCount(resourceName, -1)\n\t\t} else {\n\t\t\tmetrics.UpdateObjectCount(resourceName, count)\n\t\t}\n\t}, period, resourceCountPollPeriodJitter, true, stopCh)\n\treturn func() { close(stopCh) }\n}\n\nfunc (e *Store) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {\n\tif e.TableConvertor != nil {\n\t\treturn e.TableConvertor.ConvertToTable(ctx, object, tableOptions)\n\t}\n\treturn rest.NewDefaultTableConvertor(e.DefaultQualifiedResource).ConvertToTable(ctx, object, tableOptions)\n}\n\nfunc (e *Store) StorageVersion() runtime.GroupVersioner {\n\treturn e.StorageVersioner\n}\n\n// GetResetFields implements rest.ResetFieldsStrategy\nfunc (e *Store) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {\n\tif e.ResetFieldsStrategy == nil {\n\t\treturn nil\n\t}\n\treturn e.ResetFieldsStrategy.GetResetFields()\n}\n\n// validateIndexers will check the prefix of indexers.\nfunc validateIndexers(indexers *cache.Indexers) error {\n\tif indexers == nil {\n\t\treturn nil\n\t}\n\tfor indexName := range *indexers {\n\t\tif len(indexName) <= 2 || (indexName[:2] != \"l:\" && indexName[:2] != \"f:\") {\n\t\t\treturn fmt.Errorf(\"index must prefix with \\\"l:\\\" or \\\"f:\\\"\")\n\t\t}\n\t}\n\treturn nil\n}\n"
},
{
"directory": "pkg/storage/etcd3",
"description": "Now let's look at the etcd3 directory to understand how the object is finally stored."
},
{
"file": "pkg/storage/etcd3/store.go",
"description": "Let's look at the `Create` method!",
"line": 143,
"contents": "/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage etcd3\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"go.etcd.io/etcd/clientv3\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/conversion\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/storage\"\n\t\"k8s.io/apiserver/pkg/storage/etcd3/metrics\"\n\t\"k8s.io/apiserver/pkg/storage/value\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/klog/v2\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\n// authenticatedDataString satisfies the value.Context interface. It uses the key to\n// authenticate the stored data. This does not defend against reuse of previously\n// encrypted values under the same key, but will prevent an attacker from using an\n// encrypted value from a different key. A stronger authenticated data segment would\n// include the etcd3 Version field (which is incremented on each write to a key and\n// reset when the key is deleted), but an attacker with write access to etcd can\n// force deletion and recreation of keys to weaken that angle.\ntype authenticatedDataString string\n\n// AuthenticatedData implements the value.Context interface.\nfunc (d authenticatedDataString) AuthenticatedData() []byte {\n\treturn []byte(string(d))\n}\n\nvar _ value.Context = authenticatedDataString(\"\")\n\ntype store struct {\n\tclient *clientv3.Client\n\tcodec runtime.Codec\n\tversioner storage.Versioner\n\ttransformer value.Transformer\n\tpathPrefix string\n\twatcher *watcher\n\tpagingEnabled bool\n\tleaseManager *leaseManager\n}\n\ntype objState struct {\n\tobj runtime.Object\n\tmeta *storage.ResponseMeta\n\trev int64\n\tdata []byte\n\tstale bool\n}\n\n// New returns an etcd3 implementation of storage.Interface.\nfunc New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface {\n\treturn newStore(c, codec, newFunc, prefix, transformer, pagingEnabled, leaseManagerConfig)\n}\n\nfunc newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store {\n\tversioner := APIObjectVersioner{}\n\tresult := &store{\n\t\tclient: c,\n\t\tcodec: codec,\n\t\tversioner: versioner,\n\t\ttransformer: transformer,\n\t\tpagingEnabled: pagingEnabled,\n\t\t// for compatibility with etcd2 impl.\n\t\t// no-op for default prefix of '/registry'.\n\t\t// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'\n\t\tpathPrefix: path.Join(\"/\", prefix),\n\t\twatcher: newWatcher(c, codec, newFunc, versioner, transformer),\n\t\tleaseManager: newDefaultLeaseManager(c, leaseManagerConfig),\n\t}\n\treturn result\n}\n\n// Versioner implements storage.Interface.Versioner.\nfunc (s *store) Versioner() storage.Versioner {\n\treturn s.versioner\n}\n\n// Get implements storage.Interface.Get.\nfunc (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {\n\tkey = path.Join(s.pathPrefix, key)\n\tstartTime := time.Now()\n\tgetResp, err := s.client.KV.Get(ctx, key)\n\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(getResp.Kvs) == 0 {\n\t\tif opts.IgnoreNotFound {\n\t\t\treturn runtime.SetZeroValue(out)\n\t\t}\n\t\treturn storage.NewKeyNotFoundError(key, 0)\n\t}\n\tkv := getResp.Kvs[0]\n\n\tdata, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))\n\tif err != nil {\n\t\treturn storage.NewInternalError(err.Error())\n\t}\n\n\treturn decode(s.codec, s.versioner, data, out, kv.ModRevision)\n}\n\n// Create implements storage.Interface.Create.\nfunc (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {\n\tif version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {\n\t\treturn errors.New(\"resourceVersion should not be set on objects to be created\")\n\t}\n\tif err := s.versioner.PrepareObjectForStorage(obj); err != nil {\n\t\treturn fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tdata, err := runtime.Encode(s.codec, obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\n\topts, err := s.ttlOpts(ctx, int64(ttl))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))\n\tif err != nil {\n\t\treturn storage.NewInternalError(err.Error())\n\t}\n\n\tstartTime := time.Now()\n\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\tnotFound(key),\n\t).Then(\n\t\tclientv3.OpPut(key, string(newData), opts...),\n\t).Commit()\n\tmetrics.RecordEtcdRequestLatency(\"create\", getTypeName(obj), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !txnResp.Succeeded {\n\t\treturn storage.NewKeyExistsError(key, 0)\n\t}\n\n\tif out != nil {\n\t\tputResp := txnResp.Responses[0].GetResponsePut()\n\t\treturn decode(s.codec, s.versioner, data, out, putResp.Header.Revision)\n\t}\n\treturn nil\n}\n\n// Delete implements storage.Interface.Delete.\nfunc (s *store) Delete(\n\tctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,\n\tvalidateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {\n\tv, err := conversion.EnforcePtr(out)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\treturn s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)\n}\n\nfunc (s *store) conditionalDelete(\n\tctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,\n\tvalidateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {\n\tgetCurrentState := func() (*objState, error) {\n\t\tstartTime := time.Now()\n\t\tgetResp, err := s.client.KV.Get(ctx, key)\n\t\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s.getState(getResp, key, v, false)\n\t}\n\n\tvar origState *objState\n\tvar err error\n\tvar origStateIsCurrent bool\n\tif cachedExistingObject != nil {\n\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n\t} else {\n\t\torigState, err = getCurrentState()\n\t\torigStateIsCurrent = true\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tif preconditions != nil {\n\t\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n\t\t\t\tif origStateIsCurrent {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// It's possible we're working with stale data.\n\t\t\t\t// Actually fetch\n\t\t\t\torigState, err = getCurrentState()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\torigStateIsCurrent = true\n\t\t\t\t// Retry\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := validateDeletion(ctx, origState.obj); err != nil {\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we're working with stale data.\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tstartTime := time.Now()\n\t\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\t\tclientv3.Compare(clientv3.ModRevision(key), \"=\", origState.rev),\n\t\t).Then(\n\t\t\tclientv3.OpDelete(key),\n\t\t).Else(\n\t\t\tclientv3.OpGet(key),\n\t\t).Commit()\n\t\tmetrics.RecordEtcdRequestLatency(\"delete\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !txnResp.Succeeded {\n\t\t\tgetResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())\n\t\t\tklog.V(4).Infof(\"deletion of %s failed because of a conflict, going to retry\", key)\n\t\t\torigState, err = s.getState(getResp, key, v, false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\tcontinue\n\t\t}\n\t\treturn decode(s.codec, s.versioner, origState.data, out, origState.rev)\n\t}\n}\n\n// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\nfunc (s *store) GuaranteedUpdate(\n\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\tv, err := conversion.EnforcePtr(out)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\n\tgetCurrentState := func() (*objState, error) {\n\t\tstartTime := time.Now()\n\t\tgetResp, err := s.client.KV.Get(ctx, key)\n\t\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n\t}\n\n\tvar origState *objState\n\tvar origStateIsCurrent bool\n\tif cachedExistingObject != nil {\n\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n\t} else {\n\t\torigState, err = getCurrentState()\n\t\torigStateIsCurrent = true\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\ttrace.Step(\"initial value restored\")\n\n\ttransformContext := authenticatedDataString(key)\n\tfor {\n\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n\t\t\t// If our data is already up to date, return the error\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we were working with stale data\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n\t\tif err != nil {\n\t\t\t// If our data is already up to date, return the error\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we were working with stale data\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tdata, err := runtime.Encode(s.codec, ret)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n\t\t\t// if we skipped the original Get in this loop, we must refresh from\n\t\t\t// etcd in order to be sure the data in the store is equivalent to\n\t\t\t// our desired serialization\n\t\t\tif !origStateIsCurrent {\n\t\t\t\torigState, err = getCurrentState()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\torigStateIsCurrent = true\n\t\t\t\tif !bytes.Equal(data, origState.data) {\n\t\t\t\t\t// original data changed, restart loop\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// recheck that the data from etcd is not stale before short-circuiting a write\n\t\t\tif !origState.stale {\n\t\t\t\treturn decode(s.codec, s.versioner, origState.data, out, origState.rev)\n\t\t\t}\n\t\t}\n\n\t\tnewData, err := s.transformer.TransformToStorage(data, transformContext)\n\t\tif err != nil {\n\t\t\treturn storage.NewInternalError(err.Error())\n\t\t}\n\n\t\topts, err := s.ttlOpts(ctx, int64(ttl))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrace.Step(\"Transaction prepared\")\n\n\t\tstartTime := time.Now()\n\t\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\t\tclientv3.Compare(clientv3.ModRevision(key), \"=\", origState.rev),\n\t\t).Then(\n\t\t\tclientv3.OpPut(key, string(newData), opts...),\n\t\t).Else(\n\t\t\tclientv3.OpGet(key),\n\t\t).Commit()\n\t\tmetrics.RecordEtcdRequestLatency(\"update\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrace.Step(\"Transaction committed\")\n\t\tif !txnResp.Succeeded {\n\t\t\tgetResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())\n\t\t\tklog.V(4).Infof(\"GuaranteedUpdate of %s failed because of a conflict, going to retry\", key)\n\t\t\torigState, err = s.getState(getResp, key, v, ignoreNotFound)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttrace.Step(\"Retry value restored\")\n\t\t\torigStateIsCurrent = true\n\t\t\tcontinue\n\t\t}\n\t\tputResp := txnResp.Responses[0].GetResponsePut()\n\n\t\treturn decode(s.codec, s.versioner, data, out, putResp.Header.Revision)\n\t}\n}\n\n// GetToList implements storage.Interface.GetToList.\nfunc (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {\n\tresourceVersion := listOpts.ResourceVersion\n\tmatch := listOpts.ResourceVersionMatch\n\tpred := listOpts.Predicate\n\ttrace := utiltrace.New(\"GetToList etcd3\",\n\t\tutiltrace.Field{\"key\", key},\n\t\tutiltrace.Field{\"resourceVersion\", resourceVersion},\n\t\tutiltrace.Field{\"resourceVersionMatch\", match},\n\t\tutiltrace.Field{\"limit\", pred.Limit},\n\t\tutiltrace.Field{\"continue\", pred.Continue})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\tlistPtr, err := meta.GetItemsPtr(listObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, err := conversion.EnforcePtr(listPtr)\n\tif err != nil || v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"need ptr to slice: %v\", err)\n\t}\n\n\tnewItemFunc := getNewItemFunc(listObj, v)\n\n\tkey = path.Join(s.pathPrefix, key)\n\tstartTime := time.Now()\n\tvar opts []clientv3.OpOption\n\tif len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {\n\t\trv, err := s.versioner.ParseResourceVersion(resourceVersion)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t\t}\n\t\topts = append(opts, clientv3.WithRev(int64(rv)))\n\t}\n\n\tgetResp, err := s.client.KV.Get(ctx, key, opts...)\n\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(listPtr), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(getResp.Kvs) > 0 {\n\t\tdata, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))\n\t\tif err != nil {\n\t\t\treturn storage.NewInternalError(err.Error())\n\t\t}\n\t\tif err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// update version with cluster level revision\n\treturn s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), \"\", nil)\n}\n\nfunc getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {\n\t// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items\n\tif unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {\n\t\tif apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {\n\t\t\treturn func() runtime.Object {\n\t\t\t\treturn &unstructured.Unstructured{Object: map[string]interface{}{\"apiVersion\": apiVersion}}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Otherwise just instantiate an empty item\n\telem := v.Type().Elem()\n\treturn func() runtime.Object {\n\t\treturn reflect.New(elem).Interface().(runtime.Object)\n\t}\n}\n\nfunc (s *store) Count(key string) (int64, error) {\n\tkey = path.Join(s.pathPrefix, key)\n\n\t// We need to make sure the key ended with \"/\" so that we only get children \"directories\".\n\t// e.g. if we have key \"/a\", \"/a/b\", \"/ab\", getting keys with prefix \"/a\" will return all three,\n\t// while with prefix \"/a/\" will return only \"/a/b\" which is the correct answer.\n\tif !strings.HasSuffix(key, \"/\") {\n\t\tkey += \"/\"\n\t}\n\n\tstartTime := time.Now()\n\tgetResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())\n\tmetrics.RecordEtcdRequestLatency(\"listWithCount\", key, startTime)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getResp.Count, nil\n}\n\n// continueToken is a simple structured object for encoding the state of a continue token.\n// TODO: if we change the version of the encoded from, we can't start encoding the new version\n// until all other servers are upgraded (i.e. we need to support rolling schema)\n// This is a public API struct and cannot change.\ntype continueToken struct {\n\tAPIVersion string `json:\"v\"`\n\tResourceVersion int64 `json:\"rv\"`\n\tStartKey string `json:\"start\"`\n}\n\n// parseFrom transforms an encoded predicate from into a versioned struct.\n// TODO: return a typed error that instructs clients that they must relist\nfunc decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {\n\tdata, err := base64.RawURLEncoding.DecodeString(continueValue)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %v\", err)\n\t}\n\tvar c continueToken\n\tif err := json.Unmarshal(data, &c); err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %v\", err)\n\t}\n\tswitch c.APIVersion {\n\tcase \"meta.k8s.io/v1\":\n\t\tif c.ResourceVersion == 0 {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)\")\n\t\t}\n\t\tif len(c.StartKey) == 0 {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: encoded start key empty (version meta.k8s.io/v1)\")\n\t\t}\n\t\t// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot\n\t\t// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with\n\t\t// continue start key that is fully qualified and cannot range over anything less specific than\n\t\t// keyPrefix.\n\t\tkey := c.StartKey\n\t\tif !strings.HasPrefix(key, \"/\") {\n\t\t\tkey = \"/\" + key\n\t\t}\n\t\tcleaned := path.Clean(key)\n\t\tif cleaned != key {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %s\", c.StartKey)\n\t\t}\n\t\treturn keyPrefix + cleaned[1:], c.ResourceVersion, nil\n\tdefault:\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: server does not recognize this encoded version %q\", c.APIVersion)\n\t}\n}\n\n// encodeContinue returns a string representing the encoded continuation of the current query.\nfunc encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {\n\tnextKey := strings.TrimPrefix(key, keyPrefix)\n\tif nextKey == key {\n\t\treturn \"\", fmt.Errorf(\"unable to encode next field: the key and key prefix do not match\")\n\t}\n\tout, err := json.Marshal(&continueToken{APIVersion: \"meta.k8s.io/v1\", ResourceVersion: resourceVersion, StartKey: nextKey})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.RawURLEncoding.EncodeToString(out), nil\n}\n\n// List implements storage.Interface.List.\nfunc (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {\n\tresourceVersion := opts.ResourceVersion\n\tmatch := opts.ResourceVersionMatch\n\tpred := opts.Predicate\n\ttrace := utiltrace.New(\"List etcd3\",\n\t\tutiltrace.Field{\"key\", key},\n\t\tutiltrace.Field{\"resourceVersion\", resourceVersion},\n\t\tutiltrace.Field{\"resourceVersionMatch\", match},\n\t\tutiltrace.Field{\"limit\", pred.Limit},\n\t\tutiltrace.Field{\"continue\", pred.Continue})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\tlistPtr, err := meta.GetItemsPtr(listObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, err := conversion.EnforcePtr(listPtr)\n\tif err != nil || v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"need ptr to slice: %v\", err)\n\t}\n\n\tif s.pathPrefix != \"\" {\n\t\tkey = path.Join(s.pathPrefix, key)\n\t}\n\t// We need to make sure the key ended with \"/\" so that we only get children \"directories\".\n\t// e.g. if we have key \"/a\", \"/a/b\", \"/ab\", getting keys with prefix \"/a\" will return all three,\n\t// while with prefix \"/a/\" will return only \"/a/b\" which is the correct answer.\n\tif !strings.HasSuffix(key, \"/\") {\n\t\tkey += \"/\"\n\t}\n\tkeyPrefix := key\n\n\t// set the appropriate clientv3 options to filter the returned data set\n\tvar paging bool\n\toptions := make([]clientv3.OpOption, 0, 4)\n\tif s.pagingEnabled && pred.Limit > 0 {\n\t\tpaging = true\n\t\toptions = append(options, clientv3.WithLimit(pred.Limit))\n\t}\n\n\tnewItemFunc := getNewItemFunc(listObj, v)\n\n\tvar fromRV *uint64\n\tif len(resourceVersion) > 0 {\n\t\tparsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t\t}\n\t\tfromRV = &parsedRV\n\t}\n\n\tvar returnedRV, continueRV, withRev int64\n\tvar continueKey string\n\tswitch {\n\tcase s.pagingEnabled && len(pred.Continue) > 0:\n\t\tcontinueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid continue token: %v\", err))\n\t\t}\n\n\t\tif len(resourceVersion) > 0 && resourceVersion != \"0\" {\n\t\t\treturn apierrors.NewBadRequest(\"specifying resource version is not allowed when using continue\")\n\t\t}\n\n\t\trangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)\n\t\toptions = append(options, clientv3.WithRange(rangeEnd))\n\t\tkey = continueKey\n\n\t\t// If continueRV > 0, the LIST request needs a specific resource version.\n\t\t// continueRV==0 is invalid.\n\t\t// If continueRV < 0, the request is for the latest resource version.\n\t\tif continueRV > 0 {\n\t\t\twithRev = continueRV\n\t\t\treturnedRV = continueRV\n\t\t}\n\tcase s.pagingEnabled && pred.Limit > 0:\n\t\tif fromRV != nil {\n\t\t\tswitch match {\n\t\t\tcase metav1.ResourceVersionMatchNotOlderThan:\n\t\t\t\t// The not older than constraint is checked after we get a response from etcd,\n\t\t\t\t// and returnedRV is then set to the revision we get from the etcd response.\n\t\t\tcase metav1.ResourceVersionMatchExact:\n\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\twithRev = returnedRV\n\t\t\tcase \"\": // legacy case\n\t\t\t\tif *fromRV > 0 {\n\t\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\t\twithRev = returnedRV\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown ResourceVersionMatch value: %v\", match)\n\t\t\t}\n\t\t}\n\n\t\trangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)\n\t\toptions = append(options, clientv3.WithRange(rangeEnd))\n\tdefault:\n\t\tif fromRV != nil {\n\t\t\tswitch match {\n\t\t\tcase metav1.ResourceVersionMatchNotOlderThan:\n\t\t\t\t// The not older than constraint is checked after we get a response from etcd,\n\t\t\t\t// and returnedRV is then set to the revision we get from the etcd response.\n\t\t\tcase metav1.ResourceVersionMatchExact:\n\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\twithRev = returnedRV\n\t\t\tcase \"\": // legacy case\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown ResourceVersionMatch value: %v\", match)\n\t\t\t}\n\t\t}\n\n\t\toptions = append(options, clientv3.WithPrefix())\n\t}\n\tif withRev != 0 {\n\t\toptions = append(options, clientv3.WithRev(withRev))\n\t}\n\n\t// loop until we have filled the requested limit from etcd or there are no more results\n\tvar lastKey []byte\n\tvar hasMore bool\n\tvar getResp *clientv3.GetResponse\n\tfor {\n\t\tstartTime := time.Now()\n\t\tgetResp, err = s.client.KV.Get(ctx, key, options...)\n\t\tmetrics.RecordEtcdRequestLatency(\"list\", getTypeName(listPtr), startTime)\n\t\tif err != nil {\n\t\t\treturn interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)\n\t\t}\n\t\tif err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\thasMore = getResp.More\n\n\t\tif len(getResp.Kvs) == 0 && getResp.More {\n\t\t\treturn fmt.Errorf(\"no results were found, but etcd indicated there were more values remaining\")\n\t\t}\n\n\t\t// avoid small allocations for the result slice, since this can be called in many\n\t\t// different contexts and we don't know how significantly the result will be filtered\n\t\tif pred.Empty() {\n\t\t\tgrowSlice(v, len(getResp.Kvs))\n\t\t} else {\n\t\t\tgrowSlice(v, 2048, len(getResp.Kvs))\n\t\t}\n\n\t\t// take items from the response until the bucket is full, filtering as we go\n\t\tfor _, kv := range getResp.Kvs {\n\t\t\tif paging && int64(v.Len()) >= pred.Limit {\n\t\t\t\thasMore = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastKey = kv.Key\n\n\t\t\tdata, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))\n\t\t\tif err != nil {\n\t\t\t\treturn storage.NewInternalErrorf(\"unable to transform key %q: %v\", kv.Key, err)\n\t\t\t}\n\n\t\t\tif err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// indicate to the client which resource version was returned\n\t\tif returnedRV == 0 {\n\t\t\treturnedRV = getResp.Header.Revision\n\t\t}\n\n\t\t// no more results remain or we didn't request paging\n\t\tif !hasMore || !paging {\n\t\t\tbreak\n\t\t}\n\t\t// we're paging but we have filled our bucket\n\t\tif int64(v.Len()) >= pred.Limit {\n\t\t\tbreak\n\t\t}\n\t\tkey = string(lastKey) + \"\\x00\"\n\t\tif withRev == 0 {\n\t\t\twithRev = returnedRV\n\t\t\toptions = append(options, clientv3.WithRev(withRev))\n\t\t}\n\t}\n\n\t// instruct the client to begin querying from immediately after the last key we returned\n\t// we never return a key that the client wouldn't be allowed to see\n\tif hasMore {\n\t\t// we want to start immediately after the last key\n\t\tnext, err := encodeContinue(string(lastKey)+\"\\x00\", keyPrefix, returnedRV)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar remainingItemCount *int64\n\t\t// getResp.Count counts in objects that do not match the pred.\n\t\t// Instead of returning inaccurate count for non-empty selectors, we return nil.\n\t\t// Only set remainingItemCount if the predicate is empty.\n\t\tif utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {\n\t\t\tif pred.Empty() {\n\t\t\t\tc := int64(getResp.Count - pred.Limit)\n\t\t\t\tremainingItemCount = &c\n\t\t\t}\n\t\t}\n\t\treturn s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)\n\t}\n\n\t// no continuation\n\treturn s.versioner.UpdateList(listObj, uint64(returnedRV), \"\", nil)\n}\n\n// growSlice takes a slice value and grows its capacity up\n// to the maximum of the passed sizes or maxCapacity, whichever\n// is smaller. Above maxCapacity decisions about allocation are left\n// to the Go runtime on append. This allows a caller to make an\n// educated guess about the potential size of the total list while\n// still avoiding overly aggressive initial allocation. If sizes\n// is empty maxCapacity will be used as the size to grow.\nfunc growSlice(v reflect.Value, maxCapacity int, sizes ...int) {\n\tcap := v.Cap()\n\tmax := cap\n\tfor _, size := range sizes {\n\t\tif size > max {\n\t\t\tmax = size\n\t\t}\n\t}\n\tif len(sizes) == 0 || max > maxCapacity {\n\t\tmax = maxCapacity\n\t}\n\tif max <= cap {\n\t\treturn\n\t}\n\tif v.Len() > 0 {\n\t\textra := reflect.MakeSlice(v.Type(), 0, max)\n\t\treflect.Copy(extra, v)\n\t\tv.Set(extra)\n\t} else {\n\t\textra := reflect.MakeSlice(v.Type(), 0, max)\n\t\tv.Set(extra)\n\t}\n}\n\n// Watch implements storage.Interface.Watch.\nfunc (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {\n\treturn s.watch(ctx, key, opts, false)\n}\n\n// WatchList implements storage.Interface.WatchList.\nfunc (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {\n\treturn s.watch(ctx, key, opts, true)\n}\n\nfunc (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {\n\trev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\treturn s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)\n}\n\nfunc (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {\n\tstate := &objState{\n\t\tmeta: &storage.ResponseMeta{},\n\t}\n\n\tif u, ok := v.Addr().Interface().(runtime.Unstructured); ok {\n\t\tstate.obj = u.NewEmptyInstance()\n\t} else {\n\t\tstate.obj = reflect.New(v.Type()).Interface().(runtime.Object)\n\t}\n\n\tif len(getResp.Kvs) == 0 {\n\t\tif !ignoreNotFound {\n\t\t\treturn nil, storage.NewKeyNotFoundError(key, 0)\n\t\t}\n\t\tif err := runtime.SetZeroValue(state.obj); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdata, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))\n\t\tif err != nil {\n\t\t\treturn nil, storage.NewInternalError(err.Error())\n\t\t}\n\t\tstate.rev = getResp.Kvs[0].ModRevision\n\t\tstate.meta.ResourceVersion = uint64(state.rev)\n\t\tstate.data = data\n\t\tstate.stale = stale\n\t\tif err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn state, nil\n}\n\nfunc (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {\n\tstate := &objState{\n\t\tobj: obj,\n\t\tmeta: &storage.ResponseMeta{},\n\t}\n\n\trv, err := s.versioner.ObjectResourceVersion(obj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't get resource version: %v\", err)\n\t}\n\tstate.rev = int64(rv)\n\tstate.meta.ResourceVersion = uint64(state.rev)\n\n\t// Compute the serialized form - for that we need to temporarily clean\n\t// its resource version field (those are not stored in etcd).\n\tif err := s.versioner.PrepareObjectForStorage(obj); err != nil {\n\t\treturn nil, fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tstate.data, err = runtime.Encode(s.codec, obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\treturn state, nil\n}\n\nfunc (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {\n\tret, ttlPtr, err := userUpdate(st.obj, *st.meta)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif err := s.versioner.PrepareObjectForStorage(ret); err != nil {\n\t\treturn nil, 0, fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tvar ttl uint64\n\tif ttlPtr != nil {\n\t\tttl = *ttlPtr\n\t}\n\treturn ret, ttl, nil\n}\n\n// ttlOpts returns client options based on given ttl.\n// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length\nfunc (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {\n\tif ttl == 0 {\n\t\treturn nil, nil\n\t}\n\tid, err := s.leaseManager.GetLease(ctx, ttl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []clientv3.OpOption{clientv3.WithLease(id)}, nil\n}\n\n// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is\n// greater than the most recent actualRevision available from storage.\nfunc (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {\n\tif minimumResourceVersion == \"\" {\n\t\treturn nil\n\t}\n\tminimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)\n\tif err != nil {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t}\n\t// Enforce the storage.Interface guarantee that the resource version of the returned data\n\t// \"will be at least 'resourceVersion'\".\n\tif minimumRV > actualRevision {\n\t\treturn storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)\n\t}\n\treturn nil\n}\n\n// decode decodes value of bytes into object. It will also set the object resource version to rev.\n// On success, objPtr would be set to the object.\nfunc decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {\n\tif _, err := conversion.EnforcePtr(objPtr); err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\t_, _, err := codec.Decode(value, nil, objPtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// being unable to set the version does not prevent the object from being extracted\n\tif err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\treturn nil\n}\n\n// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.\nfunc appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {\n\tobj, _, err := codec.Decode(data, nil, newItemFunc())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// being unable to set the version does not prevent the object from being extracted\n\tif err := versioner.UpdateObject(obj, rev); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\tif matched, err := pred.Matches(obj); err == nil && matched {\n\t\tv.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))\n\t}\n\treturn nil\n}\n\nfunc notFound(key string) clientv3.Cmp {\n\treturn clientv3.Compare(clientv3.ModRevision(key), \"=\", 0)\n}\n\n// getTypeName returns type name of an object for reporting purposes.\nfunc getTypeName(obj interface{}) string {\n\treturn reflect.TypeOf(obj).String()\n}\n"
},
{
"file": "pkg/storage/etcd3/store.go",
"description": "The object is then converted to the storage version and encoded back to `[]byte`s.",
"line": 150,
"contents": "/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage etcd3\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"go.etcd.io/etcd/clientv3\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/conversion\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/storage\"\n\t\"k8s.io/apiserver/pkg/storage/etcd3/metrics\"\n\t\"k8s.io/apiserver/pkg/storage/value\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/klog/v2\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\n// authenticatedDataString satisfies the value.Context interface. It uses the key to\n// authenticate the stored data. This does not defend against reuse of previously\n// encrypted values under the same key, but will prevent an attacker from using an\n// encrypted value from a different key. A stronger authenticated data segment would\n// include the etcd3 Version field (which is incremented on each write to a key and\n// reset when the key is deleted), but an attacker with write access to etcd can\n// force deletion and recreation of keys to weaken that angle.\ntype authenticatedDataString string\n\n// AuthenticatedData implements the value.Context interface.\nfunc (d authenticatedDataString) AuthenticatedData() []byte {\n\treturn []byte(string(d))\n}\n\nvar _ value.Context = authenticatedDataString(\"\")\n\ntype store struct {\n\tclient *clientv3.Client\n\tcodec runtime.Codec\n\tversioner storage.Versioner\n\ttransformer value.Transformer\n\tpathPrefix string\n\twatcher *watcher\n\tpagingEnabled bool\n\tleaseManager *leaseManager\n}\n\ntype objState struct {\n\tobj runtime.Object\n\tmeta *storage.ResponseMeta\n\trev int64\n\tdata []byte\n\tstale bool\n}\n\n// New returns an etcd3 implementation of storage.Interface.\nfunc New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface {\n\treturn newStore(c, codec, newFunc, prefix, transformer, pagingEnabled, leaseManagerConfig)\n}\n\nfunc newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store {\n\tversioner := APIObjectVersioner{}\n\tresult := &store{\n\t\tclient: c,\n\t\tcodec: codec,\n\t\tversioner: versioner,\n\t\ttransformer: transformer,\n\t\tpagingEnabled: pagingEnabled,\n\t\t// for compatibility with etcd2 impl.\n\t\t// no-op for default prefix of '/registry'.\n\t\t// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'\n\t\tpathPrefix: path.Join(\"/\", prefix),\n\t\twatcher: newWatcher(c, codec, newFunc, versioner, transformer),\n\t\tleaseManager: newDefaultLeaseManager(c, leaseManagerConfig),\n\t}\n\treturn result\n}\n\n// Versioner implements storage.Interface.Versioner.\nfunc (s *store) Versioner() storage.Versioner {\n\treturn s.versioner\n}\n\n// Get implements storage.Interface.Get.\nfunc (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {\n\tkey = path.Join(s.pathPrefix, key)\n\tstartTime := time.Now()\n\tgetResp, err := s.client.KV.Get(ctx, key)\n\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(getResp.Kvs) == 0 {\n\t\tif opts.IgnoreNotFound {\n\t\t\treturn runtime.SetZeroValue(out)\n\t\t}\n\t\treturn storage.NewKeyNotFoundError(key, 0)\n\t}\n\tkv := getResp.Kvs[0]\n\n\tdata, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))\n\tif err != nil {\n\t\treturn storage.NewInternalError(err.Error())\n\t}\n\n\treturn decode(s.codec, s.versioner, data, out, kv.ModRevision)\n}\n\n// Create implements storage.Interface.Create.\nfunc (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {\n\tif version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {\n\t\treturn errors.New(\"resourceVersion should not be set on objects to be created\")\n\t}\n\tif err := s.versioner.PrepareObjectForStorage(obj); err != nil {\n\t\treturn fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tdata, err := runtime.Encode(s.codec, obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\n\topts, err := s.ttlOpts(ctx, int64(ttl))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))\n\tif err != nil {\n\t\treturn storage.NewInternalError(err.Error())\n\t}\n\n\tstartTime := time.Now()\n\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\tnotFound(key),\n\t).Then(\n\t\tclientv3.OpPut(key, string(newData), opts...),\n\t).Commit()\n\tmetrics.RecordEtcdRequestLatency(\"create\", getTypeName(obj), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !txnResp.Succeeded {\n\t\treturn storage.NewKeyExistsError(key, 0)\n\t}\n\n\tif out != nil {\n\t\tputResp := txnResp.Responses[0].GetResponsePut()\n\t\treturn decode(s.codec, s.versioner, data, out, putResp.Header.Revision)\n\t}\n\treturn nil\n}\n\n// Delete implements storage.Interface.Delete.\nfunc (s *store) Delete(\n\tctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,\n\tvalidateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {\n\tv, err := conversion.EnforcePtr(out)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\treturn s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)\n}\n\nfunc (s *store) conditionalDelete(\n\tctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,\n\tvalidateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {\n\tgetCurrentState := func() (*objState, error) {\n\t\tstartTime := time.Now()\n\t\tgetResp, err := s.client.KV.Get(ctx, key)\n\t\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s.getState(getResp, key, v, false)\n\t}\n\n\tvar origState *objState\n\tvar err error\n\tvar origStateIsCurrent bool\n\tif cachedExistingObject != nil {\n\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n\t} else {\n\t\torigState, err = getCurrentState()\n\t\torigStateIsCurrent = true\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tif preconditions != nil {\n\t\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n\t\t\t\tif origStateIsCurrent {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// It's possible we're working with stale data.\n\t\t\t\t// Actually fetch\n\t\t\t\torigState, err = getCurrentState()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\torigStateIsCurrent = true\n\t\t\t\t// Retry\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := validateDeletion(ctx, origState.obj); err != nil {\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we're working with stale data.\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tstartTime := time.Now()\n\t\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\t\tclientv3.Compare(clientv3.ModRevision(key), \"=\", origState.rev),\n\t\t).Then(\n\t\t\tclientv3.OpDelete(key),\n\t\t).Else(\n\t\t\tclientv3.OpGet(key),\n\t\t).Commit()\n\t\tmetrics.RecordEtcdRequestLatency(\"delete\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !txnResp.Succeeded {\n\t\t\tgetResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())\n\t\t\tklog.V(4).Infof(\"deletion of %s failed because of a conflict, going to retry\", key)\n\t\t\torigState, err = s.getState(getResp, key, v, false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\tcontinue\n\t\t}\n\t\treturn decode(s.codec, s.versioner, origState.data, out, origState.rev)\n\t}\n}\n\n// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\nfunc (s *store) GuaranteedUpdate(\n\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\tv, err := conversion.EnforcePtr(out)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\n\tgetCurrentState := func() (*objState, error) {\n\t\tstartTime := time.Now()\n\t\tgetResp, err := s.client.KV.Get(ctx, key)\n\t\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n\t}\n\n\tvar origState *objState\n\tvar origStateIsCurrent bool\n\tif cachedExistingObject != nil {\n\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n\t} else {\n\t\torigState, err = getCurrentState()\n\t\torigStateIsCurrent = true\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\ttrace.Step(\"initial value restored\")\n\n\ttransformContext := authenticatedDataString(key)\n\tfor {\n\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n\t\t\t// If our data is already up to date, return the error\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we were working with stale data\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n\t\tif err != nil {\n\t\t\t// If our data is already up to date, return the error\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we were working with stale data\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tdata, err := runtime.Encode(s.codec, ret)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n\t\t\t// if we skipped the original Get in this loop, we must refresh from\n\t\t\t// etcd in order to be sure the data in the store is equivalent to\n\t\t\t// our desired serialization\n\t\t\tif !origStateIsCurrent {\n\t\t\t\torigState, err = getCurrentState()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\torigStateIsCurrent = true\n\t\t\t\tif !bytes.Equal(data, origState.data) {\n\t\t\t\t\t// original data changed, restart loop\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// recheck that the data from etcd is not stale before short-circuiting a write\n\t\t\tif !origState.stale {\n\t\t\t\treturn decode(s.codec, s.versioner, origState.data, out, origState.rev)\n\t\t\t}\n\t\t}\n\n\t\tnewData, err := s.transformer.TransformToStorage(data, transformContext)\n\t\tif err != nil {\n\t\t\treturn storage.NewInternalError(err.Error())\n\t\t}\n\n\t\topts, err := s.ttlOpts(ctx, int64(ttl))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrace.Step(\"Transaction prepared\")\n\n\t\tstartTime := time.Now()\n\t\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\t\tclientv3.Compare(clientv3.ModRevision(key), \"=\", origState.rev),\n\t\t).Then(\n\t\t\tclientv3.OpPut(key, string(newData), opts...),\n\t\t).Else(\n\t\t\tclientv3.OpGet(key),\n\t\t).Commit()\n\t\tmetrics.RecordEtcdRequestLatency(\"update\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrace.Step(\"Transaction committed\")\n\t\tif !txnResp.Succeeded {\n\t\t\tgetResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())\n\t\t\tklog.V(4).Infof(\"GuaranteedUpdate of %s failed because of a conflict, going to retry\", key)\n\t\t\torigState, err = s.getState(getResp, key, v, ignoreNotFound)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttrace.Step(\"Retry value restored\")\n\t\t\torigStateIsCurrent = true\n\t\t\tcontinue\n\t\t}\n\t\tputResp := txnResp.Responses[0].GetResponsePut()\n\n\t\treturn decode(s.codec, s.versioner, data, out, putResp.Header.Revision)\n\t}\n}\n\n// GetToList implements storage.Interface.GetToList.\nfunc (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {\n\tresourceVersion := listOpts.ResourceVersion\n\tmatch := listOpts.ResourceVersionMatch\n\tpred := listOpts.Predicate\n\ttrace := utiltrace.New(\"GetToList etcd3\",\n\t\tutiltrace.Field{\"key\", key},\n\t\tutiltrace.Field{\"resourceVersion\", resourceVersion},\n\t\tutiltrace.Field{\"resourceVersionMatch\", match},\n\t\tutiltrace.Field{\"limit\", pred.Limit},\n\t\tutiltrace.Field{\"continue\", pred.Continue})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\tlistPtr, err := meta.GetItemsPtr(listObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, err := conversion.EnforcePtr(listPtr)\n\tif err != nil || v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"need ptr to slice: %v\", err)\n\t}\n\n\tnewItemFunc := getNewItemFunc(listObj, v)\n\n\tkey = path.Join(s.pathPrefix, key)\n\tstartTime := time.Now()\n\tvar opts []clientv3.OpOption\n\tif len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {\n\t\trv, err := s.versioner.ParseResourceVersion(resourceVersion)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t\t}\n\t\topts = append(opts, clientv3.WithRev(int64(rv)))\n\t}\n\n\tgetResp, err := s.client.KV.Get(ctx, key, opts...)\n\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(listPtr), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(getResp.Kvs) > 0 {\n\t\tdata, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))\n\t\tif err != nil {\n\t\t\treturn storage.NewInternalError(err.Error())\n\t\t}\n\t\tif err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// update version with cluster level revision\n\treturn s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), \"\", nil)\n}\n\nfunc getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {\n\t// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items\n\tif unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {\n\t\tif apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {\n\t\t\treturn func() runtime.Object {\n\t\t\t\treturn &unstructured.Unstructured{Object: map[string]interface{}{\"apiVersion\": apiVersion}}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Otherwise just instantiate an empty item\n\telem := v.Type().Elem()\n\treturn func() runtime.Object {\n\t\treturn reflect.New(elem).Interface().(runtime.Object)\n\t}\n}\n\nfunc (s *store) Count(key string) (int64, error) {\n\tkey = path.Join(s.pathPrefix, key)\n\n\t// We need to make sure the key ended with \"/\" so that we only get children \"directories\".\n\t// e.g. if we have key \"/a\", \"/a/b\", \"/ab\", getting keys with prefix \"/a\" will return all three,\n\t// while with prefix \"/a/\" will return only \"/a/b\" which is the correct answer.\n\tif !strings.HasSuffix(key, \"/\") {\n\t\tkey += \"/\"\n\t}\n\n\tstartTime := time.Now()\n\tgetResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())\n\tmetrics.RecordEtcdRequestLatency(\"listWithCount\", key, startTime)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getResp.Count, nil\n}\n\n// continueToken is a simple structured object for encoding the state of a continue token.\n// TODO: if we change the version of the encoded from, we can't start encoding the new version\n// until all other servers are upgraded (i.e. we need to support rolling schema)\n// This is a public API struct and cannot change.\ntype continueToken struct {\n\tAPIVersion string `json:\"v\"`\n\tResourceVersion int64 `json:\"rv\"`\n\tStartKey string `json:\"start\"`\n}\n\n// parseFrom transforms an encoded predicate from into a versioned struct.\n// TODO: return a typed error that instructs clients that they must relist\nfunc decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {\n\tdata, err := base64.RawURLEncoding.DecodeString(continueValue)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %v\", err)\n\t}\n\tvar c continueToken\n\tif err := json.Unmarshal(data, &c); err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %v\", err)\n\t}\n\tswitch c.APIVersion {\n\tcase \"meta.k8s.io/v1\":\n\t\tif c.ResourceVersion == 0 {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)\")\n\t\t}\n\t\tif len(c.StartKey) == 0 {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: encoded start key empty (version meta.k8s.io/v1)\")\n\t\t}\n\t\t// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot\n\t\t// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with\n\t\t// continue start key that is fully qualified and cannot range over anything less specific than\n\t\t// keyPrefix.\n\t\tkey := c.StartKey\n\t\tif !strings.HasPrefix(key, \"/\") {\n\t\t\tkey = \"/\" + key\n\t\t}\n\t\tcleaned := path.Clean(key)\n\t\tif cleaned != key {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %s\", c.StartKey)\n\t\t}\n\t\treturn keyPrefix + cleaned[1:], c.ResourceVersion, nil\n\tdefault:\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: server does not recognize this encoded version %q\", c.APIVersion)\n\t}\n}\n\n// encodeContinue returns a string representing the encoded continuation of the current query.\nfunc encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {\n\tnextKey := strings.TrimPrefix(key, keyPrefix)\n\tif nextKey == key {\n\t\treturn \"\", fmt.Errorf(\"unable to encode next field: the key and key prefix do not match\")\n\t}\n\tout, err := json.Marshal(&continueToken{APIVersion: \"meta.k8s.io/v1\", ResourceVersion: resourceVersion, StartKey: nextKey})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.RawURLEncoding.EncodeToString(out), nil\n}\n\n// List implements storage.Interface.List.\nfunc (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {\n\tresourceVersion := opts.ResourceVersion\n\tmatch := opts.ResourceVersionMatch\n\tpred := opts.Predicate\n\ttrace := utiltrace.New(\"List etcd3\",\n\t\tutiltrace.Field{\"key\", key},\n\t\tutiltrace.Field{\"resourceVersion\", resourceVersion},\n\t\tutiltrace.Field{\"resourceVersionMatch\", match},\n\t\tutiltrace.Field{\"limit\", pred.Limit},\n\t\tutiltrace.Field{\"continue\", pred.Continue})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\tlistPtr, err := meta.GetItemsPtr(listObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, err := conversion.EnforcePtr(listPtr)\n\tif err != nil || v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"need ptr to slice: %v\", err)\n\t}\n\n\tif s.pathPrefix != \"\" {\n\t\tkey = path.Join(s.pathPrefix, key)\n\t}\n\t// We need to make sure the key ended with \"/\" so that we only get children \"directories\".\n\t// e.g. if we have key \"/a\", \"/a/b\", \"/ab\", getting keys with prefix \"/a\" will return all three,\n\t// while with prefix \"/a/\" will return only \"/a/b\" which is the correct answer.\n\tif !strings.HasSuffix(key, \"/\") {\n\t\tkey += \"/\"\n\t}\n\tkeyPrefix := key\n\n\t// set the appropriate clientv3 options to filter the returned data set\n\tvar paging bool\n\toptions := make([]clientv3.OpOption, 0, 4)\n\tif s.pagingEnabled && pred.Limit > 0 {\n\t\tpaging = true\n\t\toptions = append(options, clientv3.WithLimit(pred.Limit))\n\t}\n\n\tnewItemFunc := getNewItemFunc(listObj, v)\n\n\tvar fromRV *uint64\n\tif len(resourceVersion) > 0 {\n\t\tparsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t\t}\n\t\tfromRV = &parsedRV\n\t}\n\n\tvar returnedRV, continueRV, withRev int64\n\tvar continueKey string\n\tswitch {\n\tcase s.pagingEnabled && len(pred.Continue) > 0:\n\t\tcontinueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid continue token: %v\", err))\n\t\t}\n\n\t\tif len(resourceVersion) > 0 && resourceVersion != \"0\" {\n\t\t\treturn apierrors.NewBadRequest(\"specifying resource version is not allowed when using continue\")\n\t\t}\n\n\t\trangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)\n\t\toptions = append(options, clientv3.WithRange(rangeEnd))\n\t\tkey = continueKey\n\n\t\t// If continueRV > 0, the LIST request needs a specific resource version.\n\t\t// continueRV==0 is invalid.\n\t\t// If continueRV < 0, the request is for the latest resource version.\n\t\tif continueRV > 0 {\n\t\t\twithRev = continueRV\n\t\t\treturnedRV = continueRV\n\t\t}\n\tcase s.pagingEnabled && pred.Limit > 0:\n\t\tif fromRV != nil {\n\t\t\tswitch match {\n\t\t\tcase metav1.ResourceVersionMatchNotOlderThan:\n\t\t\t\t// The not older than constraint is checked after we get a response from etcd,\n\t\t\t\t// and returnedRV is then set to the revision we get from the etcd response.\n\t\t\tcase metav1.ResourceVersionMatchExact:\n\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\twithRev = returnedRV\n\t\t\tcase \"\": // legacy case\n\t\t\t\tif *fromRV > 0 {\n\t\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\t\twithRev = returnedRV\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown ResourceVersionMatch value: %v\", match)\n\t\t\t}\n\t\t}\n\n\t\trangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)\n\t\toptions = append(options, clientv3.WithRange(rangeEnd))\n\tdefault:\n\t\tif fromRV != nil {\n\t\t\tswitch match {\n\t\t\tcase metav1.ResourceVersionMatchNotOlderThan:\n\t\t\t\t// The not older than constraint is checked after we get a response from etcd,\n\t\t\t\t// and returnedRV is then set to the revision we get from the etcd response.\n\t\t\tcase metav1.ResourceVersionMatchExact:\n\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\twithRev = returnedRV\n\t\t\tcase \"\": // legacy case\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown ResourceVersionMatch value: %v\", match)\n\t\t\t}\n\t\t}\n\n\t\toptions = append(options, clientv3.WithPrefix())\n\t}\n\tif withRev != 0 {\n\t\toptions = append(options, clientv3.WithRev(withRev))\n\t}\n\n\t// loop until we have filled the requested limit from etcd or there are no more results\n\tvar lastKey []byte\n\tvar hasMore bool\n\tvar getResp *clientv3.GetResponse\n\tfor {\n\t\tstartTime := time.Now()\n\t\tgetResp, err = s.client.KV.Get(ctx, key, options...)\n\t\tmetrics.RecordEtcdRequestLatency(\"list\", getTypeName(listPtr), startTime)\n\t\tif err != nil {\n\t\t\treturn interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)\n\t\t}\n\t\tif err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\thasMore = getResp.More\n\n\t\tif len(getResp.Kvs) == 0 && getResp.More {\n\t\t\treturn fmt.Errorf(\"no results were found, but etcd indicated there were more values remaining\")\n\t\t}\n\n\t\t// avoid small allocations for the result slice, since this can be called in many\n\t\t// different contexts and we don't know how significantly the result will be filtered\n\t\tif pred.Empty() {\n\t\t\tgrowSlice(v, len(getResp.Kvs))\n\t\t} else {\n\t\t\tgrowSlice(v, 2048, len(getResp.Kvs))\n\t\t}\n\n\t\t// take items from the response until the bucket is full, filtering as we go\n\t\tfor _, kv := range getResp.Kvs {\n\t\t\tif paging && int64(v.Len()) >= pred.Limit {\n\t\t\t\thasMore = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastKey = kv.Key\n\n\t\t\tdata, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))\n\t\t\tif err != nil {\n\t\t\t\treturn storage.NewInternalErrorf(\"unable to transform key %q: %v\", kv.Key, err)\n\t\t\t}\n\n\t\t\tif err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// indicate to the client which resource version was returned\n\t\tif returnedRV == 0 {\n\t\t\treturnedRV = getResp.Header.Revision\n\t\t}\n\n\t\t// no more results remain or we didn't request paging\n\t\tif !hasMore || !paging {\n\t\t\tbreak\n\t\t}\n\t\t// we're paging but we have filled our bucket\n\t\tif int64(v.Len()) >= pred.Limit {\n\t\t\tbreak\n\t\t}\n\t\tkey = string(lastKey) + \"\\x00\"\n\t\tif withRev == 0 {\n\t\t\twithRev = returnedRV\n\t\t\toptions = append(options, clientv3.WithRev(withRev))\n\t\t}\n\t}\n\n\t// instruct the client to begin querying from immediately after the last key we returned\n\t// we never return a key that the client wouldn't be allowed to see\n\tif hasMore {\n\t\t// we want to start immediately after the last key\n\t\tnext, err := encodeContinue(string(lastKey)+\"\\x00\", keyPrefix, returnedRV)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar remainingItemCount *int64\n\t\t// getResp.Count counts in objects that do not match the pred.\n\t\t// Instead of returning inaccurate count for non-empty selectors, we return nil.\n\t\t// Only set remainingItemCount if the predicate is empty.\n\t\tif utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {\n\t\t\tif pred.Empty() {\n\t\t\t\tc := int64(getResp.Count - pred.Limit)\n\t\t\t\tremainingItemCount = &c\n\t\t\t}\n\t\t}\n\t\treturn s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)\n\t}\n\n\t// no continuation\n\treturn s.versioner.UpdateList(listObj, uint64(returnedRV), \"\", nil)\n}\n\n// growSlice takes a slice value and grows its capacity up\n// to the maximum of the passed sizes or maxCapacity, whichever\n// is smaller. Above maxCapacity decisions about allocation are left\n// to the Go runtime on append. This allows a caller to make an\n// educated guess about the potential size of the total list while\n// still avoiding overly aggressive initial allocation. If sizes\n// is empty maxCapacity will be used as the size to grow.\nfunc growSlice(v reflect.Value, maxCapacity int, sizes ...int) {\n\tcap := v.Cap()\n\tmax := cap\n\tfor _, size := range sizes {\n\t\tif size > max {\n\t\t\tmax = size\n\t\t}\n\t}\n\tif len(sizes) == 0 || max > maxCapacity {\n\t\tmax = maxCapacity\n\t}\n\tif max <= cap {\n\t\treturn\n\t}\n\tif v.Len() > 0 {\n\t\textra := reflect.MakeSlice(v.Type(), 0, max)\n\t\treflect.Copy(extra, v)\n\t\tv.Set(extra)\n\t} else {\n\t\textra := reflect.MakeSlice(v.Type(), 0, max)\n\t\tv.Set(extra)\n\t}\n}\n\n// Watch implements storage.Interface.Watch.\nfunc (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {\n\treturn s.watch(ctx, key, opts, false)\n}\n\n// WatchList implements storage.Interface.WatchList.\nfunc (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {\n\treturn s.watch(ctx, key, opts, true)\n}\n\nfunc (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {\n\trev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\treturn s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)\n}\n\nfunc (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {\n\tstate := &objState{\n\t\tmeta: &storage.ResponseMeta{},\n\t}\n\n\tif u, ok := v.Addr().Interface().(runtime.Unstructured); ok {\n\t\tstate.obj = u.NewEmptyInstance()\n\t} else {\n\t\tstate.obj = reflect.New(v.Type()).Interface().(runtime.Object)\n\t}\n\n\tif len(getResp.Kvs) == 0 {\n\t\tif !ignoreNotFound {\n\t\t\treturn nil, storage.NewKeyNotFoundError(key, 0)\n\t\t}\n\t\tif err := runtime.SetZeroValue(state.obj); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdata, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))\n\t\tif err != nil {\n\t\t\treturn nil, storage.NewInternalError(err.Error())\n\t\t}\n\t\tstate.rev = getResp.Kvs[0].ModRevision\n\t\tstate.meta.ResourceVersion = uint64(state.rev)\n\t\tstate.data = data\n\t\tstate.stale = stale\n\t\tif err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn state, nil\n}\n\nfunc (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {\n\tstate := &objState{\n\t\tobj: obj,\n\t\tmeta: &storage.ResponseMeta{},\n\t}\n\n\trv, err := s.versioner.ObjectResourceVersion(obj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't get resource version: %v\", err)\n\t}\n\tstate.rev = int64(rv)\n\tstate.meta.ResourceVersion = uint64(state.rev)\n\n\t// Compute the serialized form - for that we need to temporarily clean\n\t// its resource version field (those are not stored in etcd).\n\tif err := s.versioner.PrepareObjectForStorage(obj); err != nil {\n\t\treturn nil, fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tstate.data, err = runtime.Encode(s.codec, obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\treturn state, nil\n}\n\nfunc (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {\n\tret, ttlPtr, err := userUpdate(st.obj, *st.meta)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif err := s.versioner.PrepareObjectForStorage(ret); err != nil {\n\t\treturn nil, 0, fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tvar ttl uint64\n\tif ttlPtr != nil {\n\t\tttl = *ttlPtr\n\t}\n\treturn ret, ttl, nil\n}\n\n// ttlOpts returns client options based on given ttl.\n// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length\nfunc (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {\n\tif ttl == 0 {\n\t\treturn nil, nil\n\t}\n\tid, err := s.leaseManager.GetLease(ctx, ttl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []clientv3.OpOption{clientv3.WithLease(id)}, nil\n}\n\n// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is\n// greater than the most recent actualRevision available from storage.\nfunc (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {\n\tif minimumResourceVersion == \"\" {\n\t\treturn nil\n\t}\n\tminimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)\n\tif err != nil {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t}\n\t// Enforce the storage.Interface guarantee that the resource version of the returned data\n\t// \"will be at least 'resourceVersion'\".\n\tif minimumRV > actualRevision {\n\t\treturn storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)\n\t}\n\treturn nil\n}\n\n// decode decodes value of bytes into object. It will also set the object resource version to rev.\n// On success, objPtr would be set to the object.\nfunc decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {\n\tif _, err := conversion.EnforcePtr(objPtr); err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\t_, _, err := codec.Decode(value, nil, objPtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// being unable to set the version does not prevent the object from being extracted\n\tif err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\treturn nil\n}\n\n// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.\nfunc appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {\n\tobj, _, err := codec.Decode(data, nil, newItemFunc())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// being unable to set the version does not prevent the object from being extracted\n\tif err := versioner.UpdateObject(obj, rev); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\tif matched, err := pred.Matches(obj); err == nil && matched {\n\t\tv.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))\n\t}\n\treturn nil\n}\n\nfunc notFound(key string) clientv3.Cmp {\n\treturn clientv3.Compare(clientv3.ModRevision(key), \"=\", 0)\n}\n\n// getTypeName returns type name of an object for reporting purposes.\nfunc getTypeName(obj interface{}) string {\n\treturn reflect.TypeOf(obj).String()\n}\n"
},
{
"file": "pkg/storage/etcd3/store.go",
"selection": {
"start": {
"line": 167,
"character": 1
},
"end": {
"line": 171,
"character": 12
}
},
"description": "Add finaly the object is written into etcd!",
"contents": "/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage etcd3\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"go.etcd.io/etcd/clientv3\"\n\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/meta\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/conversion\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/apiserver/pkg/features\"\n\t\"k8s.io/apiserver/pkg/storage\"\n\t\"k8s.io/apiserver/pkg/storage/etcd3/metrics\"\n\t\"k8s.io/apiserver/pkg/storage/value\"\n\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n\t\"k8s.io/klog/v2\"\n\tutiltrace \"k8s.io/utils/trace\"\n)\n\n// authenticatedDataString satisfies the value.Context interface. It uses the key to\n// authenticate the stored data. This does not defend against reuse of previously\n// encrypted values under the same key, but will prevent an attacker from using an\n// encrypted value from a different key. A stronger authenticated data segment would\n// include the etcd3 Version field (which is incremented on each write to a key and\n// reset when the key is deleted), but an attacker with write access to etcd can\n// force deletion and recreation of keys to weaken that angle.\ntype authenticatedDataString string\n\n// AuthenticatedData implements the value.Context interface.\nfunc (d authenticatedDataString) AuthenticatedData() []byte {\n\treturn []byte(string(d))\n}\n\nvar _ value.Context = authenticatedDataString(\"\")\n\ntype store struct {\n\tclient *clientv3.Client\n\tcodec runtime.Codec\n\tversioner storage.Versioner\n\ttransformer value.Transformer\n\tpathPrefix string\n\twatcher *watcher\n\tpagingEnabled bool\n\tleaseManager *leaseManager\n}\n\ntype objState struct {\n\tobj runtime.Object\n\tmeta *storage.ResponseMeta\n\trev int64\n\tdata []byte\n\tstale bool\n}\n\n// New returns an etcd3 implementation of storage.Interface.\nfunc New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface {\n\treturn newStore(c, codec, newFunc, prefix, transformer, pagingEnabled, leaseManagerConfig)\n}\n\nfunc newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store {\n\tversioner := APIObjectVersioner{}\n\tresult := &store{\n\t\tclient: c,\n\t\tcodec: codec,\n\t\tversioner: versioner,\n\t\ttransformer: transformer,\n\t\tpagingEnabled: pagingEnabled,\n\t\t// for compatibility with etcd2 impl.\n\t\t// no-op for default prefix of '/registry'.\n\t\t// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'\n\t\tpathPrefix: path.Join(\"/\", prefix),\n\t\twatcher: newWatcher(c, codec, newFunc, versioner, transformer),\n\t\tleaseManager: newDefaultLeaseManager(c, leaseManagerConfig),\n\t}\n\treturn result\n}\n\n// Versioner implements storage.Interface.Versioner.\nfunc (s *store) Versioner() storage.Versioner {\n\treturn s.versioner\n}\n\n// Get implements storage.Interface.Get.\nfunc (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {\n\tkey = path.Join(s.pathPrefix, key)\n\tstartTime := time.Now()\n\tgetResp, err := s.client.KV.Get(ctx, key)\n\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(getResp.Kvs) == 0 {\n\t\tif opts.IgnoreNotFound {\n\t\t\treturn runtime.SetZeroValue(out)\n\t\t}\n\t\treturn storage.NewKeyNotFoundError(key, 0)\n\t}\n\tkv := getResp.Kvs[0]\n\n\tdata, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))\n\tif err != nil {\n\t\treturn storage.NewInternalError(err.Error())\n\t}\n\n\treturn decode(s.codec, s.versioner, data, out, kv.ModRevision)\n}\n\n// Create implements storage.Interface.Create.\nfunc (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {\n\tif version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {\n\t\treturn errors.New(\"resourceVersion should not be set on objects to be created\")\n\t}\n\tif err := s.versioner.PrepareObjectForStorage(obj); err != nil {\n\t\treturn fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tdata, err := runtime.Encode(s.codec, obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\n\topts, err := s.ttlOpts(ctx, int64(ttl))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))\n\tif err != nil {\n\t\treturn storage.NewInternalError(err.Error())\n\t}\n\n\tstartTime := time.Now()\n\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\tnotFound(key),\n\t).Then(\n\t\tclientv3.OpPut(key, string(newData), opts...),\n\t).Commit()\n\tmetrics.RecordEtcdRequestLatency(\"create\", getTypeName(obj), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !txnResp.Succeeded {\n\t\treturn storage.NewKeyExistsError(key, 0)\n\t}\n\n\tif out != nil {\n\t\tputResp := txnResp.Responses[0].GetResponsePut()\n\t\treturn decode(s.codec, s.versioner, data, out, putResp.Header.Revision)\n\t}\n\treturn nil\n}\n\n// Delete implements storage.Interface.Delete.\nfunc (s *store) Delete(\n\tctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,\n\tvalidateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {\n\tv, err := conversion.EnforcePtr(out)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\treturn s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)\n}\n\nfunc (s *store) conditionalDelete(\n\tctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,\n\tvalidateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {\n\tgetCurrentState := func() (*objState, error) {\n\t\tstartTime := time.Now()\n\t\tgetResp, err := s.client.KV.Get(ctx, key)\n\t\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s.getState(getResp, key, v, false)\n\t}\n\n\tvar origState *objState\n\tvar err error\n\tvar origStateIsCurrent bool\n\tif cachedExistingObject != nil {\n\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n\t} else {\n\t\torigState, err = getCurrentState()\n\t\torigStateIsCurrent = true\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tif preconditions != nil {\n\t\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n\t\t\t\tif origStateIsCurrent {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// It's possible we're working with stale data.\n\t\t\t\t// Actually fetch\n\t\t\t\torigState, err = getCurrentState()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\torigStateIsCurrent = true\n\t\t\t\t// Retry\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif err := validateDeletion(ctx, origState.obj); err != nil {\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we're working with stale data.\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tstartTime := time.Now()\n\t\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\t\tclientv3.Compare(clientv3.ModRevision(key), \"=\", origState.rev),\n\t\t).Then(\n\t\t\tclientv3.OpDelete(key),\n\t\t).Else(\n\t\t\tclientv3.OpGet(key),\n\t\t).Commit()\n\t\tmetrics.RecordEtcdRequestLatency(\"delete\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !txnResp.Succeeded {\n\t\t\tgetResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())\n\t\t\tklog.V(4).Infof(\"deletion of %s failed because of a conflict, going to retry\", key)\n\t\t\torigState, err = s.getState(getResp, key, v, false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\tcontinue\n\t\t}\n\t\treturn decode(s.codec, s.versioner, origState.data, out, origState.rev)\n\t}\n}\n\n// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\nfunc (s *store) GuaranteedUpdate(\n\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\n\tv, err := conversion.EnforcePtr(out)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\n\tgetCurrentState := func() (*objState, error) {\n\t\tstartTime := time.Now()\n\t\tgetResp, err := s.client.KV.Get(ctx, key)\n\t\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n\t}\n\n\tvar origState *objState\n\tvar origStateIsCurrent bool\n\tif cachedExistingObject != nil {\n\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n\t} else {\n\t\torigState, err = getCurrentState()\n\t\torigStateIsCurrent = true\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\ttrace.Step(\"initial value restored\")\n\n\ttransformContext := authenticatedDataString(key)\n\tfor {\n\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n\t\t\t// If our data is already up to date, return the error\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we were working with stale data\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n\t\tif err != nil {\n\t\t\t// If our data is already up to date, return the error\n\t\t\tif origStateIsCurrent {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// It's possible we were working with stale data\n\t\t\t// Actually fetch\n\t\t\torigState, err = getCurrentState()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\torigStateIsCurrent = true\n\t\t\t// Retry\n\t\t\tcontinue\n\t\t}\n\n\t\tdata, err := runtime.Encode(s.codec, ret)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n\t\t\t// if we skipped the original Get in this loop, we must refresh from\n\t\t\t// etcd in order to be sure the data in the store is equivalent to\n\t\t\t// our desired serialization\n\t\t\tif !origStateIsCurrent {\n\t\t\t\torigState, err = getCurrentState()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\torigStateIsCurrent = true\n\t\t\t\tif !bytes.Equal(data, origState.data) {\n\t\t\t\t\t// original data changed, restart loop\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// recheck that the data from etcd is not stale before short-circuiting a write\n\t\t\tif !origState.stale {\n\t\t\t\treturn decode(s.codec, s.versioner, origState.data, out, origState.rev)\n\t\t\t}\n\t\t}\n\n\t\tnewData, err := s.transformer.TransformToStorage(data, transformContext)\n\t\tif err != nil {\n\t\t\treturn storage.NewInternalError(err.Error())\n\t\t}\n\n\t\topts, err := s.ttlOpts(ctx, int64(ttl))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrace.Step(\"Transaction prepared\")\n\n\t\tstartTime := time.Now()\n\t\ttxnResp, err := s.client.KV.Txn(ctx).If(\n\t\t\tclientv3.Compare(clientv3.ModRevision(key), \"=\", origState.rev),\n\t\t).Then(\n\t\t\tclientv3.OpPut(key, string(newData), opts...),\n\t\t).Else(\n\t\t\tclientv3.OpGet(key),\n\t\t).Commit()\n\t\tmetrics.RecordEtcdRequestLatency(\"update\", getTypeName(out), startTime)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrace.Step(\"Transaction committed\")\n\t\tif !txnResp.Succeeded {\n\t\t\tgetResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())\n\t\t\tklog.V(4).Infof(\"GuaranteedUpdate of %s failed because of a conflict, going to retry\", key)\n\t\t\torigState, err = s.getState(getResp, key, v, ignoreNotFound)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttrace.Step(\"Retry value restored\")\n\t\t\torigStateIsCurrent = true\n\t\t\tcontinue\n\t\t}\n\t\tputResp := txnResp.Responses[0].GetResponsePut()\n\n\t\treturn decode(s.codec, s.versioner, data, out, putResp.Header.Revision)\n\t}\n}\n\n// GetToList implements storage.Interface.GetToList.\nfunc (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {\n\tresourceVersion := listOpts.ResourceVersion\n\tmatch := listOpts.ResourceVersionMatch\n\tpred := listOpts.Predicate\n\ttrace := utiltrace.New(\"GetToList etcd3\",\n\t\tutiltrace.Field{\"key\", key},\n\t\tutiltrace.Field{\"resourceVersion\", resourceVersion},\n\t\tutiltrace.Field{\"resourceVersionMatch\", match},\n\t\tutiltrace.Field{\"limit\", pred.Limit},\n\t\tutiltrace.Field{\"continue\", pred.Continue})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\tlistPtr, err := meta.GetItemsPtr(listObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, err := conversion.EnforcePtr(listPtr)\n\tif err != nil || v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"need ptr to slice: %v\", err)\n\t}\n\n\tnewItemFunc := getNewItemFunc(listObj, v)\n\n\tkey = path.Join(s.pathPrefix, key)\n\tstartTime := time.Now()\n\tvar opts []clientv3.OpOption\n\tif len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {\n\t\trv, err := s.versioner.ParseResourceVersion(resourceVersion)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t\t}\n\t\topts = append(opts, clientv3.WithRev(int64(rv)))\n\t}\n\n\tgetResp, err := s.client.KV.Get(ctx, key, opts...)\n\tmetrics.RecordEtcdRequestLatency(\"get\", getTypeName(listPtr), startTime)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(getResp.Kvs) > 0 {\n\t\tdata, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))\n\t\tif err != nil {\n\t\t\treturn storage.NewInternalError(err.Error())\n\t\t}\n\t\tif err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// update version with cluster level revision\n\treturn s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), \"\", nil)\n}\n\nfunc getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {\n\t// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items\n\tif unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {\n\t\tif apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {\n\t\t\treturn func() runtime.Object {\n\t\t\t\treturn &unstructured.Unstructured{Object: map[string]interface{}{\"apiVersion\": apiVersion}}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Otherwise just instantiate an empty item\n\telem := v.Type().Elem()\n\treturn func() runtime.Object {\n\t\treturn reflect.New(elem).Interface().(runtime.Object)\n\t}\n}\n\nfunc (s *store) Count(key string) (int64, error) {\n\tkey = path.Join(s.pathPrefix, key)\n\n\t// We need to make sure the key ended with \"/\" so that we only get children \"directories\".\n\t// e.g. if we have key \"/a\", \"/a/b\", \"/ab\", getting keys with prefix \"/a\" will return all three,\n\t// while with prefix \"/a/\" will return only \"/a/b\" which is the correct answer.\n\tif !strings.HasSuffix(key, \"/\") {\n\t\tkey += \"/\"\n\t}\n\n\tstartTime := time.Now()\n\tgetResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())\n\tmetrics.RecordEtcdRequestLatency(\"listWithCount\", key, startTime)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getResp.Count, nil\n}\n\n// continueToken is a simple structured object for encoding the state of a continue token.\n// TODO: if we change the version of the encoded from, we can't start encoding the new version\n// until all other servers are upgraded (i.e. we need to support rolling schema)\n// This is a public API struct and cannot change.\ntype continueToken struct {\n\tAPIVersion string `json:\"v\"`\n\tResourceVersion int64 `json:\"rv\"`\n\tStartKey string `json:\"start\"`\n}\n\n// parseFrom transforms an encoded predicate from into a versioned struct.\n// TODO: return a typed error that instructs clients that they must relist\nfunc decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {\n\tdata, err := base64.RawURLEncoding.DecodeString(continueValue)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %v\", err)\n\t}\n\tvar c continueToken\n\tif err := json.Unmarshal(data, &c); err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %v\", err)\n\t}\n\tswitch c.APIVersion {\n\tcase \"meta.k8s.io/v1\":\n\t\tif c.ResourceVersion == 0 {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)\")\n\t\t}\n\t\tif len(c.StartKey) == 0 {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: encoded start key empty (version meta.k8s.io/v1)\")\n\t\t}\n\t\t// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot\n\t\t// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with\n\t\t// continue start key that is fully qualified and cannot range over anything less specific than\n\t\t// keyPrefix.\n\t\tkey := c.StartKey\n\t\tif !strings.HasPrefix(key, \"/\") {\n\t\t\tkey = \"/\" + key\n\t\t}\n\t\tcleaned := path.Clean(key)\n\t\tif cleaned != key {\n\t\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: %s\", c.StartKey)\n\t\t}\n\t\treturn keyPrefix + cleaned[1:], c.ResourceVersion, nil\n\tdefault:\n\t\treturn \"\", 0, fmt.Errorf(\"continue key is not valid: server does not recognize this encoded version %q\", c.APIVersion)\n\t}\n}\n\n// encodeContinue returns a string representing the encoded continuation of the current query.\nfunc encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {\n\tnextKey := strings.TrimPrefix(key, keyPrefix)\n\tif nextKey == key {\n\t\treturn \"\", fmt.Errorf(\"unable to encode next field: the key and key prefix do not match\")\n\t}\n\tout, err := json.Marshal(&continueToken{APIVersion: \"meta.k8s.io/v1\", ResourceVersion: resourceVersion, StartKey: nextKey})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.RawURLEncoding.EncodeToString(out), nil\n}\n\n// List implements storage.Interface.List.\nfunc (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {\n\tresourceVersion := opts.ResourceVersion\n\tmatch := opts.ResourceVersionMatch\n\tpred := opts.Predicate\n\ttrace := utiltrace.New(\"List etcd3\",\n\t\tutiltrace.Field{\"key\", key},\n\t\tutiltrace.Field{\"resourceVersion\", resourceVersion},\n\t\tutiltrace.Field{\"resourceVersionMatch\", match},\n\t\tutiltrace.Field{\"limit\", pred.Limit},\n\t\tutiltrace.Field{\"continue\", pred.Continue})\n\tdefer trace.LogIfLong(500 * time.Millisecond)\n\tlistPtr, err := meta.GetItemsPtr(listObj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv, err := conversion.EnforcePtr(listPtr)\n\tif err != nil || v.Kind() != reflect.Slice {\n\t\treturn fmt.Errorf(\"need ptr to slice: %v\", err)\n\t}\n\n\tif s.pathPrefix != \"\" {\n\t\tkey = path.Join(s.pathPrefix, key)\n\t}\n\t// We need to make sure the key ended with \"/\" so that we only get children \"directories\".\n\t// e.g. if we have key \"/a\", \"/a/b\", \"/ab\", getting keys with prefix \"/a\" will return all three,\n\t// while with prefix \"/a/\" will return only \"/a/b\" which is the correct answer.\n\tif !strings.HasSuffix(key, \"/\") {\n\t\tkey += \"/\"\n\t}\n\tkeyPrefix := key\n\n\t// set the appropriate clientv3 options to filter the returned data set\n\tvar paging bool\n\toptions := make([]clientv3.OpOption, 0, 4)\n\tif s.pagingEnabled && pred.Limit > 0 {\n\t\tpaging = true\n\t\toptions = append(options, clientv3.WithLimit(pred.Limit))\n\t}\n\n\tnewItemFunc := getNewItemFunc(listObj, v)\n\n\tvar fromRV *uint64\n\tif len(resourceVersion) > 0 {\n\t\tparsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t\t}\n\t\tfromRV = &parsedRV\n\t}\n\n\tvar returnedRV, continueRV, withRev int64\n\tvar continueKey string\n\tswitch {\n\tcase s.pagingEnabled && len(pred.Continue) > 0:\n\t\tcontinueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)\n\t\tif err != nil {\n\t\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid continue token: %v\", err))\n\t\t}\n\n\t\tif len(resourceVersion) > 0 && resourceVersion != \"0\" {\n\t\t\treturn apierrors.NewBadRequest(\"specifying resource version is not allowed when using continue\")\n\t\t}\n\n\t\trangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)\n\t\toptions = append(options, clientv3.WithRange(rangeEnd))\n\t\tkey = continueKey\n\n\t\t// If continueRV > 0, the LIST request needs a specific resource version.\n\t\t// continueRV==0 is invalid.\n\t\t// If continueRV < 0, the request is for the latest resource version.\n\t\tif continueRV > 0 {\n\t\t\twithRev = continueRV\n\t\t\treturnedRV = continueRV\n\t\t}\n\tcase s.pagingEnabled && pred.Limit > 0:\n\t\tif fromRV != nil {\n\t\t\tswitch match {\n\t\t\tcase metav1.ResourceVersionMatchNotOlderThan:\n\t\t\t\t// The not older than constraint is checked after we get a response from etcd,\n\t\t\t\t// and returnedRV is then set to the revision we get from the etcd response.\n\t\t\tcase metav1.ResourceVersionMatchExact:\n\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\twithRev = returnedRV\n\t\t\tcase \"\": // legacy case\n\t\t\t\tif *fromRV > 0 {\n\t\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\t\twithRev = returnedRV\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown ResourceVersionMatch value: %v\", match)\n\t\t\t}\n\t\t}\n\n\t\trangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)\n\t\toptions = append(options, clientv3.WithRange(rangeEnd))\n\tdefault:\n\t\tif fromRV != nil {\n\t\t\tswitch match {\n\t\t\tcase metav1.ResourceVersionMatchNotOlderThan:\n\t\t\t\t// The not older than constraint is checked after we get a response from etcd,\n\t\t\t\t// and returnedRV is then set to the revision we get from the etcd response.\n\t\t\tcase metav1.ResourceVersionMatchExact:\n\t\t\t\treturnedRV = int64(*fromRV)\n\t\t\t\twithRev = returnedRV\n\t\t\tcase \"\": // legacy case\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown ResourceVersionMatch value: %v\", match)\n\t\t\t}\n\t\t}\n\n\t\toptions = append(options, clientv3.WithPrefix())\n\t}\n\tif withRev != 0 {\n\t\toptions = append(options, clientv3.WithRev(withRev))\n\t}\n\n\t// loop until we have filled the requested limit from etcd or there are no more results\n\tvar lastKey []byte\n\tvar hasMore bool\n\tvar getResp *clientv3.GetResponse\n\tfor {\n\t\tstartTime := time.Now()\n\t\tgetResp, err = s.client.KV.Get(ctx, key, options...)\n\t\tmetrics.RecordEtcdRequestLatency(\"list\", getTypeName(listPtr), startTime)\n\t\tif err != nil {\n\t\t\treturn interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)\n\t\t}\n\t\tif err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\thasMore = getResp.More\n\n\t\tif len(getResp.Kvs) == 0 && getResp.More {\n\t\t\treturn fmt.Errorf(\"no results were found, but etcd indicated there were more values remaining\")\n\t\t}\n\n\t\t// avoid small allocations for the result slice, since this can be called in many\n\t\t// different contexts and we don't know how significantly the result will be filtered\n\t\tif pred.Empty() {\n\t\t\tgrowSlice(v, len(getResp.Kvs))\n\t\t} else {\n\t\t\tgrowSlice(v, 2048, len(getResp.Kvs))\n\t\t}\n\n\t\t// take items from the response until the bucket is full, filtering as we go\n\t\tfor _, kv := range getResp.Kvs {\n\t\t\tif paging && int64(v.Len()) >= pred.Limit {\n\t\t\t\thasMore = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastKey = kv.Key\n\n\t\t\tdata, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))\n\t\t\tif err != nil {\n\t\t\t\treturn storage.NewInternalErrorf(\"unable to transform key %q: %v\", kv.Key, err)\n\t\t\t}\n\n\t\t\tif err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// indicate to the client which resource version was returned\n\t\tif returnedRV == 0 {\n\t\t\treturnedRV = getResp.Header.Revision\n\t\t}\n\n\t\t// no more results remain or we didn't request paging\n\t\tif !hasMore || !paging {\n\t\t\tbreak\n\t\t}\n\t\t// we're paging but we have filled our bucket\n\t\tif int64(v.Len()) >= pred.Limit {\n\t\t\tbreak\n\t\t}\n\t\tkey = string(lastKey) + \"\\x00\"\n\t\tif withRev == 0 {\n\t\t\twithRev = returnedRV\n\t\t\toptions = append(options, clientv3.WithRev(withRev))\n\t\t}\n\t}\n\n\t// instruct the client to begin querying from immediately after the last key we returned\n\t// we never return a key that the client wouldn't be allowed to see\n\tif hasMore {\n\t\t// we want to start immediately after the last key\n\t\tnext, err := encodeContinue(string(lastKey)+\"\\x00\", keyPrefix, returnedRV)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar remainingItemCount *int64\n\t\t// getResp.Count counts in objects that do not match the pred.\n\t\t// Instead of returning inaccurate count for non-empty selectors, we return nil.\n\t\t// Only set remainingItemCount if the predicate is empty.\n\t\tif utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {\n\t\t\tif pred.Empty() {\n\t\t\t\tc := int64(getResp.Count - pred.Limit)\n\t\t\t\tremainingItemCount = &c\n\t\t\t}\n\t\t}\n\t\treturn s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)\n\t}\n\n\t// no continuation\n\treturn s.versioner.UpdateList(listObj, uint64(returnedRV), \"\", nil)\n}\n\n// growSlice takes a slice value and grows its capacity up\n// to the maximum of the passed sizes or maxCapacity, whichever\n// is smaller. Above maxCapacity decisions about allocation are left\n// to the Go runtime on append. This allows a caller to make an\n// educated guess about the potential size of the total list while\n// still avoiding overly aggressive initial allocation. If sizes\n// is empty maxCapacity will be used as the size to grow.\nfunc growSlice(v reflect.Value, maxCapacity int, sizes ...int) {\n\tcap := v.Cap()\n\tmax := cap\n\tfor _, size := range sizes {\n\t\tif size > max {\n\t\t\tmax = size\n\t\t}\n\t}\n\tif len(sizes) == 0 || max > maxCapacity {\n\t\tmax = maxCapacity\n\t}\n\tif max <= cap {\n\t\treturn\n\t}\n\tif v.Len() > 0 {\n\t\textra := reflect.MakeSlice(v.Type(), 0, max)\n\t\treflect.Copy(extra, v)\n\t\tv.Set(extra)\n\t} else {\n\t\textra := reflect.MakeSlice(v.Type(), 0, max)\n\t\tv.Set(extra)\n\t}\n}\n\n// Watch implements storage.Interface.Watch.\nfunc (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {\n\treturn s.watch(ctx, key, opts, false)\n}\n\n// WatchList implements storage.Interface.WatchList.\nfunc (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {\n\treturn s.watch(ctx, key, opts, true)\n}\n\nfunc (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {\n\trev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey = path.Join(s.pathPrefix, key)\n\treturn s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)\n}\n\nfunc (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {\n\tstate := &objState{\n\t\tmeta: &storage.ResponseMeta{},\n\t}\n\n\tif u, ok := v.Addr().Interface().(runtime.Unstructured); ok {\n\t\tstate.obj = u.NewEmptyInstance()\n\t} else {\n\t\tstate.obj = reflect.New(v.Type()).Interface().(runtime.Object)\n\t}\n\n\tif len(getResp.Kvs) == 0 {\n\t\tif !ignoreNotFound {\n\t\t\treturn nil, storage.NewKeyNotFoundError(key, 0)\n\t\t}\n\t\tif err := runtime.SetZeroValue(state.obj); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdata, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))\n\t\tif err != nil {\n\t\t\treturn nil, storage.NewInternalError(err.Error())\n\t\t}\n\t\tstate.rev = getResp.Kvs[0].ModRevision\n\t\tstate.meta.ResourceVersion = uint64(state.rev)\n\t\tstate.data = data\n\t\tstate.stale = stale\n\t\tif err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn state, nil\n}\n\nfunc (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {\n\tstate := &objState{\n\t\tobj: obj,\n\t\tmeta: &storage.ResponseMeta{},\n\t}\n\n\trv, err := s.versioner.ObjectResourceVersion(obj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't get resource version: %v\", err)\n\t}\n\tstate.rev = int64(rv)\n\tstate.meta.ResourceVersion = uint64(state.rev)\n\n\t// Compute the serialized form - for that we need to temporarily clean\n\t// its resource version field (those are not stored in etcd).\n\tif err := s.versioner.PrepareObjectForStorage(obj); err != nil {\n\t\treturn nil, fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tstate.data, err = runtime.Encode(s.codec, obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\treturn state, nil\n}\n\nfunc (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {\n\tret, ttlPtr, err := userUpdate(st.obj, *st.meta)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif err := s.versioner.PrepareObjectForStorage(ret); err != nil {\n\t\treturn nil, 0, fmt.Errorf(\"PrepareObjectForStorage failed: %v\", err)\n\t}\n\tvar ttl uint64\n\tif ttlPtr != nil {\n\t\tttl = *ttlPtr\n\t}\n\treturn ret, ttl, nil\n}\n\n// ttlOpts returns client options based on given ttl.\n// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length\nfunc (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {\n\tif ttl == 0 {\n\t\treturn nil, nil\n\t}\n\tid, err := s.leaseManager.GetLease(ctx, ttl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []clientv3.OpOption{clientv3.WithLease(id)}, nil\n}\n\n// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is\n// greater than the most recent actualRevision available from storage.\nfunc (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {\n\tif minimumResourceVersion == \"\" {\n\t\treturn nil\n\t}\n\tminimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)\n\tif err != nil {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"invalid resource version: %v\", err))\n\t}\n\t// Enforce the storage.Interface guarantee that the resource version of the returned data\n\t// \"will be at least 'resourceVersion'\".\n\tif minimumRV > actualRevision {\n\t\treturn storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)\n\t}\n\treturn nil\n}\n\n// decode decodes value of bytes into object. It will also set the object resource version to rev.\n// On success, objPtr would be set to the object.\nfunc decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {\n\tif _, err := conversion.EnforcePtr(objPtr); err != nil {\n\t\treturn fmt.Errorf(\"unable to convert output object to pointer: %v\", err)\n\t}\n\t_, _, err := codec.Decode(value, nil, objPtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// being unable to set the version does not prevent the object from being extracted\n\tif err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\treturn nil\n}\n\n// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.\nfunc appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {\n\tobj, _, err := codec.Decode(data, nil, newItemFunc())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// being unable to set the version does not prevent the object from being extracted\n\tif err := versioner.UpdateObject(obj, rev); err != nil {\n\t\tklog.Errorf(\"failed to update object version: %v\", err)\n\t}\n\tif matched, err := pred.Matches(obj); err == nil && matched {\n\t\tv.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))\n\t}\n\treturn nil\n}\n\nfunc notFound(key string) clientv3.Cmp {\n\treturn clientv3.Compare(clientv3.ModRevision(key), \"=\", 0)\n}\n\n// getTypeName returns type name of an object for reporting purposes.\nfunc getTypeName(obj interface{}) string {\n\treturn reflect.TypeOf(obj).String()\n}\n"
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment