未验证 提交 c3786272 编写于 作者: R runzexia

recover kube-open version

Signed-off-by: Nrunzexia <runzexia@yunify.com>
上级 20df96f9
......@@ -5229,6 +5229,7 @@
},
"io.k8s.api.core.v1.LocalObjectReference": {
"description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
"type": "object",
"properties": {
"name": {
"description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
......@@ -5238,6 +5239,7 @@
},
"io.k8s.api.networking.v1.IPBlock": {
"description": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
"type": "object",
"required": [
"cidr"
],
......@@ -5257,6 +5259,7 @@
},
"io.k8s.api.networking.v1.NetworkPolicyPort": {
"description": "NetworkPolicyPort describes a port to allow traffic on",
"type": "object",
"properties": {
"port": {
"description": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.",
......@@ -5270,6 +5273,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": {
"description": "APIGroup contains the name, the supported versions, and the preferred version of a group.",
"type": "object",
"required": [
"name",
"versions"
......@@ -5316,6 +5320,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList": {
"description": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
"type": "object",
"required": [
"groups"
],
......@@ -5346,6 +5351,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": {
"description": "APIResource specifies the name of a resource and whether it is namespaced.",
"type": "object",
"required": [
"name",
"singularName",
......@@ -5403,6 +5409,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList": {
"description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
"type": "object",
"required": [
"groupVersion",
"resources"
......@@ -5438,6 +5445,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions": {
"description": "DeleteOptions may be provided when deleting an API object.",
"type": "object",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
......@@ -5502,6 +5510,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery": {
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"type": "object",
"required": [
"groupVersion",
"version"
......@@ -5519,6 +5528,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.Initializer": {
"description": "Initializer is information about an initializer that has not yet completed.",
"type": "object",
"required": [
"name"
],
......@@ -5531,6 +5541,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.Initializers": {
"description": "Initializers tracks the progress of initialization.",
"type": "object",
"required": [
"pending"
],
......@@ -5552,6 +5563,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector": {
"description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
"type": "object",
"properties": {
"matchExpressions": {
"description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
......@@ -5571,6 +5583,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement": {
"description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
"type": "object",
"required": [
"key",
"operator"
......@@ -5597,6 +5610,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": {
"description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
"type": "object",
"properties": {
"continue": {
"description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
......@@ -5614,6 +5628,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": {
"description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
"type": "object",
"properties": {
"annotations": {
"description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
......@@ -5700,6 +5715,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": {
"description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
"type": "object",
"required": [
"apiVersion",
"kind",
......@@ -5734,10 +5750,12 @@
}
},
"io.k8s.apimachinery.pkg.apis.meta.v1.Patch": {
"description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body."
"description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
"type": "object"
},
"io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions": {
"description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
"type": "object",
"properties": {
"uid": {
"description": "Specifies the target UID.",
......@@ -5747,6 +5765,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR": {
"description": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
"type": "object",
"required": [
"clientCIDR",
"serverAddress"
......@@ -5764,6 +5783,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.Status": {
"description": "Status is a return value for calls that don't return other objects.",
"type": "object",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
......@@ -5809,6 +5829,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause": {
"description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
"type": "object",
"properties": {
"field": {
"description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
......@@ -5826,6 +5847,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": {
"description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
"type": "object",
"properties": {
"causes": {
"description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
......@@ -5864,6 +5886,7 @@
},
"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": {
"description": "Event represents a single event to a watched resource.",
"type": "object",
"required": [
"type",
"object"
......@@ -5907,6 +5930,7 @@
},
"io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object",
"required": [
"Raw"
],
......@@ -5925,6 +5949,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicy": {
"description": "WorkspaceNetworkPolicy is a set of network policies applied to the scope to workspace",
"type": "object",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
......@@ -5954,6 +5979,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicyEgressRule": {
"description": "WorkspaceNetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a WorkspaceNetworkPolicySpec's podSelector. The traffic must match both ports and to.",
"type": "object",
"properties": {
"from": {
"description": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
......@@ -5973,6 +5999,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicyIngressRule": {
"description": "WorkspaceNetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a WorkspaceNetworkPolicySpec's podSelector. The traffic must match both ports and from.",
"type": "object",
"properties": {
"from": {
"description": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.",
......@@ -5992,6 +6019,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicyList": {
"description": "WorkspaceNetworkPolicyList contains a list of WorkspaceNetworkPolicy",
"type": "object",
"required": [
"items"
],
......@@ -6024,6 +6052,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicyPeer": {
"description": "WorkspaceNetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of fields are allowed. It is same as 'NetworkPolicyPeer' in k8s but with an additional field 'WorkspaceSelector'",
"type": "object",
"properties": {
"ipBlock": {
"description": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.",
......@@ -6044,6 +6073,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicySpec": {
"description": "WorkspaceNetworkPolicySpec defines the desired state of WorkspaceNetworkPolicy",
"type": "object",
"properties": {
"egress": {
"description": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
......@@ -6073,10 +6103,12 @@
}
},
"io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.WorkspaceNetworkPolicyStatus": {
"description": "WorkspaceNetworkPolicyStatus defines the observed state of WorkspaceNetworkPolicy"
"description": "WorkspaceNetworkPolicyStatus defines the observed state of WorkspaceNetworkPolicy",
"type": "object"
},
"io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace": {
"description": "Workspace is the Schema for the workspaces API",
"type": "object",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
......@@ -6106,6 +6138,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceList": {
"description": "WorkspaceList contains a list of Workspace",
"type": "object",
"required": [
"items"
],
......@@ -6138,6 +6171,7 @@
},
"io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceSpec": {
"description": "WorkspaceSpec defines the desired state of Workspace",
"type": "object",
"properties": {
"manager": {
"type": "string"
......@@ -6145,7 +6179,8 @@
}
},
"io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceStatus": {
"description": "WorkspaceStatus defines the observed state of Workspace"
"description": "WorkspaceStatus defines the observed state of Workspace",
"type": "object"
}
}
}
\ No newline at end of file
......@@ -6,6 +6,7 @@ require (
bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c // indirect
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Microsoft/go-winio v0.4.12 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/goquery v1.5.0
github.com/Sirupsen/logrus v1.4.1 // indirect
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect
......@@ -76,6 +77,8 @@ require (
github.com/mholt/certmagic v0.5.1 // indirect
github.com/miekg/dns v1.1.9 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/onsi/ginkgo v1.8.0
github.com/onsi/gomega v1.5.0
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
......@@ -119,8 +122,9 @@ require (
k8s.io/apiserver v0.0.0-20190507070644-e9c02aff496d
k8s.io/client-go v0.0.0-20181213151034-8d9ed539ba31
k8s.io/code-generator v0.0.0-20190405172246-9a4d48088f6a
k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a // indirect
k8s.io/klog v0.3.1
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058
k8s.io/kube-openapi v0.0.0-20181109181836-c59034cc13d5
k8s.io/kubernetes v1.13.6
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect
sigs.k8s.io/application v0.0.0-20190404151855-67ae7f915d4e
......
此差异已折叠。
language: go
go:
- 1.7
- 1.8
- 1.x
- tip
env:
- GO111MODULE=on
install:
- go mod download
script:
- go test -race -v
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright (c) 2015 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this library except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
......@@ -6,6 +6,10 @@ response body, for clients which support it. Although it's usually simpler to
leave that to a reverse proxy (like nginx or Varnish), this package is useful
when that's undesirable.
## Install
```bash
go get -u github.com/NYTimes/gziphandler
```
## Usage
......@@ -48,5 +52,5 @@ The docs can be found at [godoc.org][docs], as usual.
[docs]: https://godoc.org/github.com/nytimes/gziphandler
[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md
[docs]: https://godoc.org/github.com/NYTimes/gziphandler
[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
module github.com/NYTimes/gziphandler
go 1.11
require github.com/stretchr/testify v1.3.0
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
package gziphandler
package gziphandler // import "github.com/NYTimes/gziphandler"
import (
"bufio"
"compress/gzip"
"fmt"
"io"
"mime"
"net"
"net/http"
"strconv"
......@@ -28,9 +29,11 @@ const (
// The examples seem to indicate that it is.
DefaultQValue = 1.0
// DefaultMinSize defines the minimum size to reach to enable compression.
// It's 512 bytes.
DefaultMinSize = 512
// DefaultMinSize is the default minimum size until we enable gzip compression.
// 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
// If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
// That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
DefaultMinSize = 1400
)
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
......@@ -80,40 +83,71 @@ type GzipResponseWriter struct {
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
}
type GzipResponseWriterWithCloseNotify struct {
*GzipResponseWriter
}
func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Write appends data to the gzip writer.
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If content type is not set.
if _, ok := w.Header()[contentType]; !ok {
// It infer it from the uncompressed body.
w.Header().Set(contentType, http.DetectContentType(b))
}
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
if w.gw != nil {
n, err := w.gw.Write(b)
return n, err
return w.gw.Write(b)
}
// If we have already decided not to use GZIP, immediately passthrough.
if w.ignore {
return w.ResponseWriter.Write(b)
}
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
// On the first write, w.buf changes from nil to a valid slice
w.buf = append(w.buf, b...)
// If the global writes are bigger than the minSize, compression is enable.
if len(w.buf) >= w.minSize {
err := w.startGzip()
if err != nil {
return 0, err
var (
cl, _ = strconv.Atoi(w.Header().Get(contentLength))
ct = w.Header().Get(contentType)
ce = w.Header().Get(contentEncoding)
)
// Only continue if they didn't already choose an encoding or a known unhandled content length or type.
if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
// If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
if len(w.buf) < w.minSize && cl == 0 {
return len(b), nil
}
// If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
if cl >= w.minSize || len(w.buf) >= w.minSize {
// If a Content-Type wasn't specified, infer it from the current buffer.
if ct == "" {
ct = http.DetectContentType(w.buf)
w.Header().Set(contentType, ct)
}
// If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
if handleContentType(w.contentTypes, ct) {
if err := w.startGzip(); err != nil {
return 0, err
}
return len(b), nil
}
}
}
// If we got here, we should not GZIP this response.
if err := w.startPlain(); err != nil {
return 0, err
}
return len(b), nil
}
// startGzip initialize any GZIP specific informations.
// startGzip initializes a GZIP writer and writes the buffer.
func (w *GzipResponseWriter) startGzip() error {
// Set the GZIP header.
w.Header().Set(contentEncoding, "gzip")
......@@ -125,28 +159,57 @@ func (w *GzipResponseWriter) startGzip() error {
// Write the header to gzip response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
// Ensure that no other WriteHeader's happen
w.code = 0
}
// Initialize the GZIP response.
w.init()
// Flush the buffer into the gzip reponse.
n, err := w.gw.Write(w.buf)
// Initialize and flush the buffer into the gzip response if there are any bytes.
// If there aren't any, we shouldn't initialize it yet because on Close it will
// write the gzip header even if nothing was ever written.
if len(w.buf) > 0 {
// Initialize the GZIP response.
w.init()
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
err = io.ErrShortWrite
}
return err
}
return nil
}
// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
func (w *GzipResponseWriter) startPlain() error {
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
// Ensure that no other WriteHeader's happen
w.code = 0
}
w.ignore = true
// If Write was never called then don't call Write on the underlying ResponseWriter.
if w.buf == nil {
return nil
}
n, err := w.ResponseWriter.Write(w.buf)
w.buf = nil
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
return io.ErrShortWrite
err = io.ErrShortWrite
}
w.buf = nil
return err
}
// WriteHeader just saves the response code until close or GZIP effective writes.
func (w *GzipResponseWriter) WriteHeader(code int) {
w.code = code
if w.code == 0 {
w.code = code
}
}
// init graps a new gzip writer from the gzipWriterPool and writes the correct
......@@ -161,19 +224,18 @@ func (w *GzipResponseWriter) init() {
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
func (w *GzipResponseWriter) Close() error {
if w.ignore {
return nil
}
if w.gw == nil {
// Gzip not trigged yet, write out regular response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
}
if w.buf != nil {
_, writeErr := w.ResponseWriter.Write(w.buf)
// Returns the error if any at write.
if writeErr != nil {
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
}
// GZIP not triggered yet, write out regular response.
err := w.startPlain()
// Returns the error if any at write.
if err != nil {
err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
}
return nil
return err
}
err := w.gw.Close()
......@@ -186,6 +248,14 @@ func (w *GzipResponseWriter) Close() error {
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
// an http.Flusher.
func (w *GzipResponseWriter) Flush() {
if w.gw == nil && !w.ignore {
// Only flush once startGzip or startPlain has been called.
//
// Flush is thus a no-op until we're certain whether a plain
// or gzipped response will be served.
return
}
if w.gw != nil {
w.gw.Flush()
}
......@@ -230,27 +300,44 @@ func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) {
return nil, fmt.Errorf("invalid compression level requested: %d", level)
return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
}
func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
c := &config{
level: gzip.DefaultCompression,
minSize: DefaultMinSize,
}
if minSize < 0 {
return nil, fmt.Errorf("minimum size must be more than zero")
for _, o := range opts {
o(c)
}
if err := c.validate(); err != nil {
return nil, err
}
return func(h http.Handler) http.Handler {
index := poolIndex(level)
index := poolIndex(c.level)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add(vary, acceptEncoding)
if acceptsGzip(r) {
gw := &GzipResponseWriter{
ResponseWriter: w,
index: index,
minSize: minSize,
minSize: c.minSize,
contentTypes: c.contentTypes,
}
defer gw.Close()
h.ServeHTTP(gw, r)
if _, ok := w.(http.CloseNotifier); ok {
gwcn := GzipResponseWriterWithCloseNotify{gw}
h.ServeHTTP(gwcn, r)
} else {
h.ServeHTTP(gw, r)
}
} else {
h.ServeHTTP(w, r)
}
......@@ -258,6 +345,98 @@ func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler
}, nil
}
// Parsed representation of one of the inputs to ContentTypes.
// See https://golang.org/pkg/mime/#ParseMediaType
type parsedContentType struct {
mediaType string
params map[string]string
}
// equals returns whether this content type matches another content type.
func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
if pct.mediaType != mediaType {
return false
}
// if pct has no params, don't care about other's params
if len(pct.params) == 0 {
return true
}
// if pct has any params, they must be identical to other's.
if len(pct.params) != len(params) {
return false
}
for k, v := range pct.params {
if w, ok := params[k]; !ok || v != w {
return false
}
}
return true
}
// Used for functional configuration.
type config struct {
minSize int
level int
contentTypes []parsedContentType
}
func (c *config) validate() error {
if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
return fmt.Errorf("invalid compression level requested: %d", c.level)
}
if c.minSize < 0 {
return fmt.Errorf("minimum size must be more than zero")
}
return nil
}
type option func(c *config)
func MinSize(size int) option {
return func(c *config) {
c.minSize = size
}
}
func CompressionLevel(level int) option {
return func(c *config) {
c.level = level
}
}
// ContentTypes specifies a list of content types to compare
// the Content-Type header to before compressing. If none
// match, the response will be returned as-is.
//
// Content types are compared in a case-insensitive, whitespace-ignored
// manner.
//
// A MIME type without any other directive will match a content type
// that has the same MIME type, regardless of that content type's other
// directives. I.e., "text/html" will match both "text/html" and
// "text/html; charset=utf-8".
//
// A MIME type with any other directive will only match a content type
// that has the same MIME type and other directives. I.e.,
// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
//
// By default, responses are gzipped regardless of
// Content-Type.
func ContentTypes(types []string) option {
return func(c *config) {
c.contentTypes = []parsedContentType{}
for _, v := range types {
mediaType, params, err := mime.ParseMediaType(v)
if err == nil {
c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
}
}
}
}
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
// the client supports it (via the Accept-Encoding header). This will compress at
// the default compression level.
......@@ -273,6 +452,27 @@ func acceptsGzip(r *http.Request) bool {
return acceptedEncodings["gzip"] > 0.0
}
// returns true if we've been configured to compress the specific content type.
func handleContentType(contentTypes []parsedContentType, ct string) bool {
// If contentTypes is empty we handle all content types.
if len(contentTypes) == 0 {
return true
}
mediaType, params, err := mime.ParseMediaType(ct)
if err != nil {
return false
}
for _, c := range contentTypes {
if c.equals(mediaType, params) {
return true
}
}
return false
}
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
// appear in an Accept-Encoding header. It returns a map of content-codings to
// quality values, and an error containing the errors encountered. It's probably
......
......@@ -6,6 +6,7 @@ go:
before_install:
- go get -t -v ./...
- go get -t -v github.com/modern-go/reflect2-tests/...
script:
- ./test.sh
......
......@@ -24,7 +24,7 @@
# go-tests = true
# unused-packages = true
ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"]
ignored = []
[[constraint]]
name = "github.com/modern-go/concurrent"
......
......@@ -150,6 +150,9 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
}
func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
if type1 == nil {
return nil
}
cacheKey := uintptr(unpackEFace(type1).data)
typeObj, found := cfg.cache.Load(cacheKey)
if found {
......
......@@ -3,7 +3,7 @@
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
......
......@@ -4,6 +4,7 @@ import (
"reflect"
"runtime"
"strings"
"sync"
"unsafe"
)
......@@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
var types = map[string]reflect.Type{}
var packages = map[string]map[string]reflect.Type{}
// initOnce guards initialization of types and packages
var initOnce sync.Once
var types map[string]reflect.Type
var packages map[string]map[string]reflect.Type
// discoverTypes initializes types and packages
func discoverTypes() {
types = make(map[string]reflect.Type)
packages = make(map[string]map[string]reflect.Type)
func init() {
ver := runtime.Version()
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
loadGo15Types()
......@@ -90,11 +98,13 @@ type emptyInterface struct {
// TypeByName return the type by its name, just like Class.forName in java
func TypeByName(typeName string) Type {
initOnce.Do(discoverTypes)
return Type2(types[typeName])
}
// TypeByPackageName return the type by its package and name
func TypeByPackageName(pkgPath string, name string) Type {
initOnce.Do(discoverTypes)
pkgTypes := packages[pkgPath]
if pkgTypes == nil {
return nil
......
include $(GOROOT)/src/Make.inc
TARG=bitbucket.org/ww/goautoneg
GOFILES=autoneg.go
include $(GOROOT)/src/Make.pkg
format:
gofmt -w *.go
docs:
gomake clean
godoc ${TARG} > README.txt
PACKAGE
package goautoneg
import "bitbucket.org/ww/goautoneg"
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FUNCTIONS
func Negotiate(header string, alternatives []string) (content_type string)
Negotiate the most appropriate content_type given the accept header
and a list of alternatives.
func ParseAccept(header string) (accept []Accept)
Parse an Accept Header string returning a sorted list
of clauses
TYPES
type Accept struct {
Type, SubType string
Q float32
Params map[string]string
}
Structure to represent a clause in an HTTP Accept Header
SUBDIRECTORIES
.hg
/*
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package goautoneg
import (
"sort"
"strconv"
"strings"
)
// Structure to represent a clause in an HTTP Accept Header
type Accept struct {
Type, SubType string
Q float64
Params map[string]string
}
// For internal use, so that we can use the sort interface
type accept_slice []Accept
func (accept accept_slice) Len() int {
slice := []Accept(accept)
return len(slice)
}
func (accept accept_slice) Less(i, j int) bool {
slice := []Accept(accept)
ai, aj := slice[i], slice[j]
if ai.Q > aj.Q {
return true
}
if ai.Type != "*" && aj.Type == "*" {
return true
}
if ai.SubType != "*" && aj.SubType == "*" {
return true
}
return false
}
func (accept accept_slice) Swap(i, j int) {
slice := []Accept(accept)
slice[i], slice[j] = slice[j], slice[i]
}
// Parse an Accept Header string returning a sorted list
// of clauses
func ParseAccept(header string) (accept []Accept) {
parts := strings.Split(header, ",")
accept = make([]Accept, 0, len(parts))
for _, part := range parts {
part := strings.Trim(part, " ")
a := Accept{}
a.Params = make(map[string]string)
a.Q = 1.0
mrp := strings.Split(part, ";")
media_range := mrp[0]
sp := strings.Split(media_range, "/")
a.Type = strings.Trim(sp[0], " ")
switch {
case len(sp) == 1 && a.Type == "*":
a.SubType = "*"
case len(sp) == 2:
a.SubType = strings.Trim(sp[1], " ")
default:
continue
}
if len(mrp) == 1 {
accept = append(accept, a)
continue
}
for _, param := range mrp[1:] {
sp := strings.SplitN(param, "=", 2)
if len(sp) != 2 {
continue
}
token := strings.Trim(sp[0], " ")
if token == "q" {
a.Q, _ = strconv.ParseFloat(sp[1], 32)
} else {
a.Params[token] = strings.Trim(sp[1], " ")
}
}
accept = append(accept, a)
}
slice := accept_slice(accept)
sort.Sort(slice)
return
}
// Negotiate the most appropriate content_type given the accept header
// and a list of alternatives.
func Negotiate(header string, alternatives []string) (content_type string) {
asp := make([][]string, 0, len(alternatives))
for _, ctype := range alternatives {
asp = append(asp, strings.SplitN(ctype, "/", 2))
}
for _, clause := range ParseAccept(header) {
for i, ctsp := range asp {
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
content_type = alternatives[i]
return
}
if clause.Type == ctsp[0] && clause.SubType == "*" {
content_type = alternatives[i]
return
}
if clause.Type == "*" && clause.SubType == "*" {
content_type = alternatives[i]
return
}
}
}
return
}
......@@ -718,7 +718,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) {
}
if !ut.Key.IsAssignable() {
klog.Fatalf("Hit an unsupported type %v.", uet)
klog.Fatalf("Hit an unsupported type %v for: %v", uet, t)
}
sw.Do("*out = make($.|raw$, len(*in))\n", t)
......@@ -745,6 +745,10 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) {
case uet.IsAssignable():
sw.Do("(*out)[key] = val\n", nil)
case uet.Kind == types.Interface:
// Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function
if uet.Name.Name == "interface{}" {
klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy<named-interface> as one of the methods.", uet.Name.Name)
}
sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil)
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
......@@ -761,7 +765,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) {
case uet.Kind == types.Struct:
sw.Do("(*out)[key] = *val.DeepCopy()\n", uet)
default:
klog.Fatalf("Hit an unsupported type %v.", uet)
klog.Fatalf("Hit an unsupported type %v for %v", uet, t)
}
sw.Do("}\n", nil)
}
......@@ -793,6 +797,10 @@ func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) {
g.generateFor(ut.Elem, sw)
sw.Do("}\n", nil)
} else if uet.Kind == types.Interface {
// Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function
if uet.Name.Name == "interface{}" {
klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy<named-interface> as one of the methods.", uet.Name.Name)
}
sw.Do("if (*in)[i] != nil {\n", nil)
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
......@@ -802,7 +810,7 @@ func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) {
} else if uet.Kind == types.Struct {
sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil)
} else {
klog.Fatalf("Hit an unsupported type %v.", uet)
klog.Fatalf("Hit an unsupported type %v for %v", uet, t)
}
sw.Do("}\n", nil)
}
......@@ -863,6 +871,10 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args)
}
case uft.Kind == types.Interface:
// Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function
if uft.Name.Name == "interface{}" {
klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy<named-interface> as one of the methods.", uft.Name.Name)
}
sw.Do("if in.$.name$ != nil {\n", args)
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
......@@ -870,7 +882,7 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) {
sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args)
sw.Do("}\n", nil)
default:
klog.Fatalf("Hit an unsupported type %v.", uft)
klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t)
}
}
}
......@@ -907,6 +919,6 @@ func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) {
sw.Do("*out = new($.Elem|raw$)\n", ut)
sw.Do("(*in).DeepCopyInto(*out)\n", nil)
default:
klog.Fatalf("Hit an unsupported type %v.", uet)
klog.Fatalf("Hit an unsupported type %v for %v", uet, t)
}
}
......@@ -18,7 +18,6 @@ package args
import (
"fmt"
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
......@@ -39,8 +38,6 @@ func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
// WithoutDefaultFlagParsing() disables implicit addition of command line flags and parsing,
// which allows registering custom arguments afterwards
genericArgs := args.Default().WithoutDefaultFlagParsing()
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kube-openapi/boilerplate/boilerplate.go.txt")
customArgs := &CustomArgs{}
genericArgs.CustomArgs = customArgs
......
......@@ -54,4 +54,5 @@ func main() {
); err != nil {
log.Fatalf("OpenAPI code generation error: %v", err)
}
log.Println("Code for OpenAPI definitions generated")
}
......@@ -139,7 +139,6 @@ func newAPILinter() *apiLinter {
rules: []APIRule{
&rules.NamesMatch{},
&rules.OmitEmptyMatchCase{},
&rules.ListTypeMissing{},
},
}
}
......
......@@ -171,7 +171,7 @@ func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error {
sw.Do("return map[string]$.OpenAPIDefinition|raw${\n", argsFromType(nil))
for _, t := range c.Order {
err := newOpenAPITypeWriter(sw, c).generateCall(t)
err := newOpenAPITypeWriter(sw).generateCall(t)
if err != nil {
return err
}
......@@ -186,7 +186,7 @@ func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error {
func (g *openAPIGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
klog.V(5).Infof("generating for type %v", t)
sw := generator.NewSnippetWriter(w, c, "$", "$")
err := newOpenAPITypeWriter(sw, c).generate(t)
err := newOpenAPITypeWriter(sw).generate(t)
if err != nil {
return err
}
......@@ -221,15 +221,13 @@ func shouldInlineMembers(m *types.Member) bool {
type openAPITypeWriter struct {
*generator.SnippetWriter
context *generator.Context
refTypes map[string]*types.Type
GetDefinitionInterface *types.Type
}
func newOpenAPITypeWriter(sw *generator.SnippetWriter, c *generator.Context) openAPITypeWriter {
func newOpenAPITypeWriter(sw *generator.SnippetWriter) openAPITypeWriter {
return openAPITypeWriter{
SnippetWriter: sw,
context: c,
refTypes: map[string]*types.Type{},
}
}
......@@ -338,23 +336,12 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
}
g.Do("return $.OpenAPIDefinition|raw${\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", args)
g.generateDescription(t.CommentLines)
g.Do("Type: []string{\"object\"},\n", nil)
// write members into a temporary buffer, in order to postpone writing out the Properties field. We only do
// that if it is not empty.
propertiesBuf := bytes.Buffer{}
bsw := g
bsw.SnippetWriter = generator.NewSnippetWriter(&propertiesBuf, g.context, "$", "$")
required, err := bsw.generateMembers(t, []string{})
g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args)
required, err := g.generateMembers(t, []string{})
if err != nil {
return err
}
if propertiesBuf.Len() > 0 {
g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args)
g.Do(strings.Replace(propertiesBuf.String(), "$", "$\"$\"$", -1), nil) // escape $ (used as delimiter of the templates)
g.Do("},\n", nil)
}
g.Do("},\n", nil)
if len(required) > 0 {
g.Do("Required: []string{\"$.$\"},\n", strings.Join(required, "\",\""))
}
......@@ -363,14 +350,13 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
return err
}
g.Do("},\n", nil)
g.Do("Dependencies: []string{\n", args)
// Map order is undefined, sort them or we may get a different file generated each time.
keys := []string{}
for k := range g.refTypes {
keys = append(keys, k)
}
sort.Strings(keys)
deps := []string{}
for _, k := range keys {
v := g.refTypes[k]
if t, _ := openapi.GetOpenAPITypeFormat(v.String()); t != "" {
......@@ -378,16 +364,9 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
// Will eliminate special case of time.Time
continue
}
deps = append(deps, k)
}
if len(deps) > 0 {
g.Do("Dependencies: []string{\n", args)
for _, k := range deps {
g.Do("\"$.$\",", k)
}
g.Do("},\n", nil)
g.Do("\"$.$\",", k)
}
g.Do("}\n}\n\n", nil)
g.Do("},\n}\n}\n\n", nil)
}
return nil
}
......@@ -397,18 +376,11 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error {
// Initially, we will only log struct extension errors.
if len(errors) > 0 {
for _, e := range errors {
klog.Errorf("[%s]: %s\n", t.String(), e)
klog.V(2).Infof("[%s]: %s\n", t.String(), e)
}
}
unions, errors := parseUnions(t)
if len(errors) > 0 {
for _, e := range errors {
klog.Errorf("[%s]: %s\n", t.String(), e)
}
}
// TODO(seans3): Validate struct extensions here.
g.emitExtensions(extensions, unions)
g.emitExtensions(extensions)
return nil
}
......@@ -423,20 +395,20 @@ func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *typ
klog.V(2).Infof("%s %s\n", errorPrefix, e)
}
}
g.emitExtensions(extensions, nil)
g.emitExtensions(extensions)
return nil
}
func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union) {
func (g openAPITypeWriter) emitExtensions(extensions []extension) {
// If any extensions exist, then emit code to create them.
if len(extensions) == 0 && len(unions) == 0 {
if len(extensions) == 0 {
return
}
g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil)
for _, extension := range extensions {
g.Do("\"$.$\": ", extension.xName)
if extension.hasMultipleValues() {
g.Do("[]interface{}{\n", nil)
g.Do("[]string{\n", nil)
}
for _, value := range extension.values {
g.Do("\"$.$\",\n", value)
......@@ -445,13 +417,6 @@ func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union
g.Do("},\n", nil)
}
}
if len(unions) > 0 {
g.Do("\"x-kubernetes-unions\": []interface{}{\n", nil)
for _, u := range unions {
u.emit(g)
}
g.Do("},\n", nil)
}
g.Do("},\n},\n", nil)
}
......@@ -596,7 +561,7 @@ func (g openAPITypeWriter) generateMapProperty(t *types.Type) error {
return fmt.Errorf("map with non-string keys are not supported by OpenAPI in %v", t)
}
g.Do("Type: []string{\"object\"},\n", nil)
g.Do("AdditionalProperties: &spec.SchemaOrBool{\nAllows: true,\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil)
g.Do("AdditionalProperties: &spec.SchemaOrBool{\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil)
typeString, format := openapi.GetOpenAPITypeFormat(elemType.String())
if typeString != "" {
g.generateSimpleProperty(typeString, format)
......
package rules
import (
"k8s.io/gengo/types"
)
const ListTypeIDLTag = "listType"
// ListTypeMissing implements APIRule interface.
// A list type is required for inlined list.
type ListTypeMissing struct{}
// Name returns the name of APIRule
func (l *ListTypeMissing) Name() string {
return "list_type_missing"
}
// Validate evaluates API rule on type t and returns a list of field names in
// the type that violate the rule. Empty field name [""] implies the entire
// type violates the rule.
func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) {
fields := make([]string, 0)
switch t.Kind {
case types.Struct:
for _, m := range t.Members {
if m.Type.Kind == types.Slice && types.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] == nil {
fields = append(fields, m.Name)
continue
}
}
}
return fields, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"sort"
"k8s.io/gengo/types"
)
const tagUnionMember = "union"
const tagUnionDeprecated = "unionDeprecated"
const tagUnionDiscriminator = "unionDiscriminator"
type union struct {
discriminator string
fieldsToDiscriminated map[string]string
}
// emit prints the union, can be called on a nil union (emits nothing)
func (u *union) emit(g openAPITypeWriter) {
if u == nil {
return
}
g.Do("map[string]interface{}{\n", nil)
if u.discriminator != "" {
g.Do("\"discriminator\": \"$.$\",\n", u.discriminator)
}
g.Do("\"fields-to-discriminateBy\": map[string]interface{}{\n", nil)
keys := []string{}
for field := range u.fieldsToDiscriminated {
keys = append(keys, field)
}
sort.Strings(keys)
for _, field := range keys {
g.Do("\"$.$\": ", field)
g.Do("\"$.$\",\n", u.fieldsToDiscriminated[field])
}
g.Do("},\n", nil)
g.Do("},\n", nil)
}
// Sets the discriminator if it's not set yet, otherwise return an error
func (u *union) setDiscriminator(value string) []error {
errors := []error{}
if u.discriminator != "" {
errors = append(errors, fmt.Errorf("at least two discriminators found: %v and %v", value, u.discriminator))
}
u.discriminator = value
return errors
}
// Add a new member to the union
func (u *union) addMember(jsonName, variableName string) {
if _, ok := u.fieldsToDiscriminated[jsonName]; ok {
panic(fmt.Errorf("same field (%v) found multiple times", jsonName))
}
u.fieldsToDiscriminated[jsonName] = variableName
}
// Makes sure that the union is valid, specifically looking for re-used discriminated
func (u *union) isValid() []error {
errors := []error{}
// Case 1: discriminator but no fields
if u.discriminator != "" && len(u.fieldsToDiscriminated) == 0 {
errors = append(errors, fmt.Errorf("discriminator set with no fields in union"))
}
// Case 2: two fields have the same discriminated value
discriminated := map[string]struct{}{}
for _, d := range u.fieldsToDiscriminated {
if _, ok := discriminated[d]; ok {
errors = append(errors, fmt.Errorf("discriminated value is used twice: %v", d))
}
discriminated[d] = struct{}{}
}
// Case 3: a field is both discriminator AND part of the union
if u.discriminator != "" {
if _, ok := u.fieldsToDiscriminated[u.discriminator]; ok {
errors = append(errors, fmt.Errorf("%v can't be both discriminator and part of the union", u.discriminator))
}
}
return errors
}
// Find unions either directly on the members (or inlined members, not
// going across types) or on the type itself, or on embedded types.
func parseUnions(t *types.Type) ([]union, []error) {
errors := []error{}
unions := []union{}
su, err := parseUnionStruct(t)
if su != nil {
unions = append(unions, *su)
}
errors = append(errors, err...)
eu, err := parseEmbeddedUnion(t)
unions = append(unions, eu...)
errors = append(errors, err...)
mu, err := parseUnionMembers(t)
if mu != nil {
unions = append(unions, *mu)
}
errors = append(errors, err...)
return unions, errors
}
// Find unions in embedded types, unions shouldn't go across types.
func parseEmbeddedUnion(t *types.Type) ([]union, []error) {
errors := []error{}
unions := []union{}
for _, m := range t.Members {
if hasOpenAPITagValue(m.CommentLines, tagValueFalse) {
continue
}
if !shouldInlineMembers(&m) {
continue
}
u, err := parseUnions(m.Type)
unions = append(unions, u...)
errors = append(errors, err...)
}
return unions, errors
}
// Look for union tag on a struct, and then include all the fields
// (except the discriminator if there is one). The struct shouldn't have
// embedded types.
func parseUnionStruct(t *types.Type) (*union, []error) {
errors := []error{}
if types.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil {
return nil, nil
}
u := &union{fieldsToDiscriminated: map[string]string{}}
for _, m := range t.Members {
jsonName := getReferableName(&m)
if jsonName == "" {
continue
}
if shouldInlineMembers(&m) {
errors = append(errors, fmt.Errorf("union structures can't have embedded fields: %v.%v", t.Name, m.Name))
continue
}
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil {
errors = append(errors, fmt.Errorf("union struct can't have unionDeprecated members: %v.%v", t.Name, m.Name))
continue
}
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil {
errors = append(errors, u.setDiscriminator(jsonName)...)
} else {
if !hasOptionalTag(&m) {
errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name))
}
u.addMember(jsonName, m.Name)
}
}
return u, errors
}
// Find unions specifically on members.
func parseUnionMembers(t *types.Type) (*union, []error) {
errors := []error{}
u := &union{fieldsToDiscriminated: map[string]string{}}
for _, m := range t.Members {
jsonName := getReferableName(&m)
if jsonName == "" {
continue
}
if shouldInlineMembers(&m) {
continue
}
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil {
errors = append(errors, u.setDiscriminator(jsonName)...)
}
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil {
errors = append(errors, fmt.Errorf("union tag is not accepted on struct members: %v.%v", t.Name, m.Name))
continue
}
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil {
if !hasOptionalTag(&m) {
errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name))
}
u.addMember(jsonName, m.Name)
}
}
if len(u.fieldsToDiscriminated) == 0 {
return nil, nil
}
return u, append(errors, u.isValid()...)
}
......@@ -20,21 +20,24 @@ import (
"bytes"
"compress/gzip"
"crypto/sha512"
"encoding/json"
"fmt"
"mime"
"net/http"
"strings"
"sync"
"time"
"bitbucket.org/ww/goautoneg"
yaml "gopkg.in/yaml.v2"
"github.com/NYTimes/gziphandler"
"github.com/emicklei/go-restful"
restful "github.com/emicklei/go-restful"
"github.com/go-openapi/spec"
"github.com/golang/protobuf/proto"
"github.com/googleapis/gnostic/OpenAPIv2"
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
"github.com/googleapis/gnostic/compiler"
"github.com/json-iterator/go"
"github.com/munnerz/goautoneg"
"gopkg.in/yaml.v2"
"k8s.io/kube-openapi/pkg/builder"
"k8s.io/kube-openapi/pkg/common"
......@@ -76,13 +79,66 @@ func computeETag(data []byte) string {
return fmt.Sprintf("\"%X\"", sha512.Sum512(data))
}
// NewOpenAPIService builds an OpenAPIService starting with the given spec.
func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) {
o := &OpenAPIService{}
if err := o.UpdateSpec(spec); err != nil {
// NOTE: [DEPRECATION] We will announce deprecation for format-separated endpoints for OpenAPI spec,
// and switch to a single /openapi/v2 endpoint in Kubernetes 1.10. The design doc and deprecation process
// are tracked at: https://docs.google.com/document/d/19lEqE9lc4yHJ3WJAJxS_G7TcORIJXGHyq3wpwcH28nU.
//
// BuildAndRegisterOpenAPIService builds the spec and registers a handler to provide access to it.
// Use this method if your OpenAPI spec is static. If you want to update the spec, use BuildOpenAPISpec then RegisterOpenAPIService.
func BuildAndRegisterOpenAPIService(servePath string, webServices []*restful.WebService, config *common.Config, handler common.PathHandler) (*OpenAPIService, error) {
spec, err := builder.BuildOpenAPISpec(webServices, config)
if err != nil {
return nil, err
}
return RegisterOpenAPIService(spec, servePath, handler)
}
// NOTE: [DEPRECATION] We will announce deprecation for format-separated endpoints for OpenAPI spec,
// and switch to a single /openapi/v2 endpoint in Kubernetes 1.10. The design doc and deprecation process
// are tracked at: https://docs.google.com/document/d/19lEqE9lc4yHJ3WJAJxS_G7TcORIJXGHyq3wpwcH28nU.
//
// RegisterOpenAPIService registers a handler to provide access to provided swagger spec.
// Note: servePath should end with ".json" as the RegisterOpenAPIService assume it is serving a
// json file and will also serve .pb and .gz files.
func RegisterOpenAPIService(openapiSpec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) {
if !strings.HasSuffix(servePath, jsonExt) {
return nil, fmt.Errorf("serving path must end with \"%s\"", jsonExt)
}
servePathBase := strings.TrimSuffix(servePath, jsonExt)
o := OpenAPIService{}
if err := o.UpdateSpec(openapiSpec); err != nil {
return nil, err
}
return o, nil
type fileInfo struct {
ext string
getDataAndETag func() ([]byte, string, time.Time)
}
files := []fileInfo{
{".json", o.getSwaggerBytes},
{"-2.0.0.json", o.getSwaggerBytes},
{"-2.0.0.pb-v1", o.getSwaggerPbBytes},
{"-2.0.0.pb-v1.gz", o.getSwaggerPbGzBytes},
}
for _, file := range files {
path := servePathBase + file.ext
getDataAndETag := file.getDataAndETag
handler.Handle(path, gziphandler.GzipHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
data, etag, lastModified := getDataAndETag()
w.Header().Set("Etag", etag)
// ServeContent will take care of caching using eTag.
http.ServeContent(w, r, path, lastModified, bytes.NewReader(data))
}),
))
}
return &o, nil
}
func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time) {
......@@ -104,15 +160,11 @@ func (o *OpenAPIService) getSwaggerPbGzBytes() ([]byte, string, time.Time) {
}
func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) {
specBytes, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(openapiSpec)
specBytes, err := json.MarshalIndent(openapiSpec, " ", " ")
if err != nil {
return err
}
var json map[string]interface{}
if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(specBytes, &json); err != nil {
return err
}
specPb, err := ToProtoBinary(json)
specPb, err := toProtoBinary(specBytes)
if err != nil {
return err
}
......@@ -138,50 +190,13 @@ func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) {
return nil
}
func jsonToYAML(j map[string]interface{}) yaml.MapSlice {
if j == nil {
return nil
}
ret := make(yaml.MapSlice, 0, len(j))
for k, v := range j {
ret = append(ret, yaml.MapItem{k, jsonToYAMLValue(v)})
}
return ret
}
func jsonToYAMLValue(j interface{}) interface{} {
switch j := j.(type) {
case map[string]interface{}:
return jsonToYAML(j)
case []interface{}:
ret := make([]interface{}, len(j))
for i := range j {
ret[i] = jsonToYAMLValue(j[i])
}
return ret
case float64:
// replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
if i64 := int64(j); j == float64(i64) {
if i := int(i64); i64 == int64(i) {
return i
}
return i64
}
if ui64 := uint64(j); j == float64(ui64) {
return ui64
}
return j
case int64:
if i := int(j); j == int64(i) {
return i
}
return j
func toProtoBinary(spec []byte) ([]byte, error) {
var info yaml.MapSlice
err := yaml.Unmarshal(spec, &info)
if err != nil {
return nil, err
}
return j
}
func ToProtoBinary(json map[string]interface{}) ([]byte, error) {
document, err := openapi_v2.NewDocument(jsonToYAML(json), compiler.NewContext("$root", nil))
document, err := openapi_v2.NewDocument(info, compiler.NewContext("$root", nil))
if err != nil {
return nil, err
}
......@@ -197,18 +212,12 @@ func toGzip(data []byte) []byte {
}
// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec.
//
// Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead.
func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) {
o, err := NewOpenAPIService(spec)
if err != nil {
func RegisterOpenAPIVersionedService(openapiSpec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) {
o := OpenAPIService{}
if err := o.UpdateSpec(openapiSpec); err != nil {
return nil, err
}
return o, o.RegisterOpenAPIVersionedService(servePath, handler)
}
// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec.
func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error {
accepted := []struct {
Type string
SubType string
......@@ -249,7 +258,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
}),
))
return nil
return &o, nil
}
// BuildAndRegisterOpenAPIVersionedService builds the spec and registers a handler to provide access to it.
......@@ -259,9 +268,5 @@ func BuildAndRegisterOpenAPIVersionedService(servePath string, webServices []*re
if err != nil {
return nil, err
}
o, err := NewOpenAPIService(spec)
if err != nil {
return nil, err
}
return o, o.RegisterOpenAPIVersionedService(servePath, handler)
return RegisterOpenAPIVersionedService(spec, servePath, handler)
}
......@@ -92,16 +92,13 @@ func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) {
// We believe the schema is a reference, verify that and returns a new
// Schema
func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) {
// TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error.
if len(s.GetProperties().GetAdditionalProperties()) > 0 {
return nil, newSchemaError(path, "unallowed embedded type definition")
}
// TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error.
if len(s.GetType().GetValue()) > 0 {
return nil, newSchemaError(path, "definition reference can't have a type")
}
// TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error.
if !strings.HasPrefix(s.GetXRef(), "#/definitions/") {
return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef())
}
......@@ -130,7 +127,6 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error)
return nil, newSchemaError(path, "invalid object type")
}
var sub Schema
// TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type.
if s.GetAdditionalProperties().GetSchema() == nil {
sub = &Arbitrary{
BaseSchema: d.parseBaseSchema(s, path),
......@@ -161,7 +157,6 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema,
case Number: // do nothing
case Integer: // do nothing
case Boolean: // do nothing
// TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error.
default:
return nil, newSchemaError(path, "Unknown primitive type: %q", t)
}
......@@ -180,8 +175,6 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro
return nil, newSchemaError(path, `array should have type "array"`)
}
if len(s.GetItems().GetSchema()) != 1 {
// TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error.
// TODO(wrong): "type: array" witohut any items at all is completely valid.
return nil, newSchemaError(path, "array should have exactly one sub-item")
}
sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path)
......@@ -234,8 +227,6 @@ func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema,
// this function is public, it doesn't leak through the interface.
func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) {
if s.GetXRef() != "" {
// TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered
// Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object
return d.parseReference(s, path)
}
objectTypes := s.GetType().GetValue()
......@@ -243,15 +234,11 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err
case 0:
// in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include
// the type:object property (they only included the "properties" property), so we need to handle this case
// TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them.
if s.GetProperties() != nil {
// TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type.
// TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given
return d.parseKind(s, path)
} else {
// Definition has no type and no properties. Treat it as an arbitrary value
// TODO(incomplete): what if it has additionalProperties=false or patternProperties?
// ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete).
// TODO: what if it has additionalProperties or patternProperties?
return d.parseArbitrary(s, path)
}
case 1:
......@@ -269,8 +256,6 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err
return d.parsePrimitive(s, path)
default:
// the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types
// TODO(wrong): this is rejecting a completely valid OpenAPI spec
// TODO(CRD validation schema publishing): filter these out
return nil, newSchemaError(path, "definitions with multiple types aren't supported")
}
}
......
......@@ -21,39 +21,14 @@ import (
"strings"
)
// [DEPRECATED] ToCanonicalName converts Golang package/type canonical name into REST friendly OpenAPI name.
// This method is deprecated because it has a misleading name. Please use ToRESTFriendlyName
// instead
//
// NOTE: actually the "canonical name" in this method should be named "REST friendly OpenAPI name",
// which is different from "canonical name" defined in GetCanonicalTypeName. The "canonical name" defined
// in GetCanonicalTypeName means Go type names with full package path.
//
// Examples of REST friendly OpenAPI name:
// ToCanonicalName converts Golang package/type name into canonical OpenAPI name.
// Examples:
// Input: k8s.io/api/core/v1.Pod
// Output: io.k8s.api.core.v1.Pod
//
// Input: k8s.io/api/core/v1
// Output: io.k8s.api.core.v1
//
// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
func ToCanonicalName(name string) string {
return ToRESTFriendlyName(name)
}
// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name.
//
// Examples of REST friendly OpenAPI name:
// Input: k8s.io/api/core/v1.Pod
// Output: io.k8s.api.core.v1.Pod
//
// Input: k8s.io/api/core/v1
// Output: io.k8s.api.core.v1
//
// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
func ToRESTFriendlyName(name string) string {
nameParts := strings.Split(name, "/")
// Reverse first part. e.g., io.k8s... instead of k8s.io...
if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
......@@ -66,30 +41,9 @@ func ToRESTFriendlyName(name string) string {
return strings.Join(nameParts, ".")
}
// OpenAPICanonicalTypeNamer is an interface for models without Go type to seed model name.
//
// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying
// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/"
// should be used. For custom resource definition (CRD), the canonical name is expected to be
// group/version.kind
//
// Examples of canonical name:
// Go type: k8s.io/kubernetes/pkg/apis/core.Pod
// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo
//
// Example for vendored Go type:
// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod
// Canonical name: k8s.io/api/core/v1.Pod
type OpenAPICanonicalTypeNamer interface {
OpenAPICanonicalTypeName() string
}
// GetCanonicalTypeName will find the canonical type name of a sample object, removing
// the "vendor" part of the path
func GetCanonicalTypeName(model interface{}) string {
if namer, ok := model.(OpenAPICanonicalTypeNamer); ok {
return namer.OpenAPICanonicalTypeName()
}
t := reflect.TypeOf(model)
if t.Kind() == reflect.Ptr {
t = t.Elem()
......
......@@ -5,7 +5,7 @@ github.com/Azure/go-ansiterm/winterm
github.com/Azure/go-ansiterm
# github.com/Microsoft/go-winio v0.4.12
github.com/Microsoft/go-winio
# github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46
# github.com/NYTimes/gziphandler v1.1.1
github.com/NYTimes/gziphandler
# github.com/PuerkitoBio/goquery v1.5.0
github.com/PuerkitoBio/goquery
......@@ -346,10 +346,8 @@ github.com/miekg/dns
github.com/mitchellh/go-homedir
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d
github.com/munnerz/goautoneg
# github.com/naoina/go-stringutil v0.1.0
github.com/naoina/go-stringutil
# github.com/naoina/toml v0.1.1
......@@ -1043,7 +1041,7 @@ k8s.io/code-generator/cmd/client-gen/generators/fake
k8s.io/code-generator/cmd/client-gen/generators/scheme
k8s.io/code-generator/cmd/client-gen/generators/util
k8s.io/code-generator/cmd/client-gen/path
# k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6
# k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a
k8s.io/gengo/args
k8s.io/gengo/examples/deepcopy-gen/generators
k8s.io/gengo/generator
......@@ -1054,7 +1052,7 @@ k8s.io/gengo/examples/set-gen/sets
# k8s.io/klog v0.3.1
k8s.io/klog
k8s.io/klog/klogr
# k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058
# k8s.io/kube-openapi v0.0.0-20181109181836-c59034cc13d5
k8s.io/kube-openapi/cmd/openapi-gen/
k8s.io/kube-openapi/pkg/common
k8s.io/kube-openapi/pkg/builder
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册