mirror of https://github.com/k3s-io/k3s
Merge pull request #9971 from smarterclayton/make_conversion_more_flexible
Improve conversion to support multiple packagespull/6/head
commit
1a613c43fe
|
@ -17,13 +17,16 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"
|
||||
pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
@ -50,7 +53,9 @@ func main() {
|
|||
funcOut = file
|
||||
}
|
||||
|
||||
generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw())
|
||||
generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", *version))
|
||||
apiShort := generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api")
|
||||
generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource")
|
||||
// TODO(wojtek-t): Change the overwrites to a flag.
|
||||
generator.OverwritePackage(*version, "")
|
||||
for _, knownType := range api.Scheme.KnownTypes(*version) {
|
||||
|
@ -58,10 +63,14 @@ func main() {
|
|||
glog.Errorf("error while generating conversion functions for %v: %v", knownType, err)
|
||||
}
|
||||
}
|
||||
generator.RepackImports(util.NewStringSet())
|
||||
if err := generator.WriteImports(funcOut); err != nil {
|
||||
glog.Fatalf("error while writing imports: %v", err)
|
||||
}
|
||||
if err := generator.WriteConversionFunctions(funcOut); err != nil {
|
||||
glog.Fatalf("Error while writing conversion functions: %v", err)
|
||||
}
|
||||
if err := generator.RegisterConversionFunctions(funcOut); err != nil {
|
||||
if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil {
|
||||
glog.Fatalf("Error while writing conversion functions: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,12 +19,14 @@ package main
|
|||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"
|
||||
pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
@ -53,10 +55,14 @@ func main() {
|
|||
}
|
||||
|
||||
knownVersion := *version
|
||||
registerTo := "api.Scheme"
|
||||
if knownVersion == "api" {
|
||||
knownVersion = api.Scheme.Raw().InternalVersion
|
||||
registerTo = "Scheme"
|
||||
}
|
||||
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw())
|
||||
pkgPath := path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", knownVersion)
|
||||
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), pkgPath, util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes"))
|
||||
generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api")
|
||||
|
||||
for _, overwrite := range strings.Split(*overwrites, ",") {
|
||||
vals := strings.Split(overwrite, "=")
|
||||
|
@ -67,13 +73,14 @@ func main() {
|
|||
glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err)
|
||||
}
|
||||
}
|
||||
if err := generator.WriteImports(funcOut, *version); err != nil {
|
||||
generator.RepackImports()
|
||||
if err := generator.WriteImports(funcOut); err != nil {
|
||||
glog.Fatalf("error while writing imports: %v", err)
|
||||
}
|
||||
if err := generator.WriteDeepCopyFunctions(funcOut); err != nil {
|
||||
glog.Fatalf("error while writing deep copy functions: %v", err)
|
||||
}
|
||||
if err := generator.RegisterDeepCopyFunctions(funcOut, *version); err != nil {
|
||||
if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil {
|
||||
glog.Fatalf("error while registering deep copy functions: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,14 +33,6 @@ function generate_version() {
|
|||
cat >> $TMPFILE <<EOF
|
||||
package ${version}
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
)
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
EOF
|
||||
|
||||
|
|
|
@ -18,14 +18,14 @@ package api
|
|||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"speter.net/go/exp/math/dec/inf"
|
||||
"time"
|
||||
resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
fields "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
labels "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
util "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
inf "speter.net/go/exp/math/dec/inf"
|
||||
time "time"
|
||||
)
|
||||
|
||||
func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
|
||||
|
@ -587,7 +587,7 @@ func deepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Clone
|
|||
func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error {
|
||||
out.Type = in.Type
|
||||
if in.Max != nil {
|
||||
out.Max = make(map[ResourceName]resource.Quantity)
|
||||
out.Max = make(ResourceList)
|
||||
for key, val := range in.Max {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -599,7 +599,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv
|
|||
out.Max = nil
|
||||
}
|
||||
if in.Min != nil {
|
||||
out.Min = make(map[ResourceName]resource.Quantity)
|
||||
out.Min = make(ResourceList)
|
||||
for key, val := range in.Min {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -611,7 +611,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv
|
|||
out.Min = nil
|
||||
}
|
||||
if in.Default != nil {
|
||||
out.Default = make(map[ResourceName]resource.Quantity)
|
||||
out.Default = make(ResourceList)
|
||||
for key, val := range in.Default {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -857,7 +857,7 @@ func deepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) err
|
|||
|
||||
func deepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error {
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1041,7 +1041,7 @@ func deepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, ou
|
|||
out.AccessModes = nil
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1143,7 +1143,7 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
|
|||
|
||||
func deepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error {
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1571,7 +1571,7 @@ func deepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList
|
|||
|
||||
func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error {
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[ResourceName]resource.Quantity)
|
||||
out.Hard = make(ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1587,7 +1587,7 @@ func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec
|
|||
|
||||
func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error {
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[ResourceName]resource.Quantity)
|
||||
out.Hard = make(ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1599,7 +1599,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota
|
|||
out.Hard = nil
|
||||
}
|
||||
if in.Used != nil {
|
||||
out.Used = make(map[ResourceName]resource.Quantity)
|
||||
out.Used = make(ResourceList)
|
||||
for key, val := range in.Used {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1615,7 +1615,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota
|
|||
|
||||
func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error {
|
||||
if in.Limits != nil {
|
||||
out.Limits = make(map[ResourceName]resource.Quantity)
|
||||
out.Limits = make(ResourceList)
|
||||
for key, val := range in.Limits {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1627,7 +1627,7 @@ func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceReq
|
|||
out.Limits = nil
|
||||
}
|
||||
if in.Requests != nil {
|
||||
out.Requests = make(map[ResourceName]resource.Quantity)
|
||||
out.Requests = make(ResourceList)
|
||||
for key, val := range in.Requests {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
|
|
@ -16,15 +16,14 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
api "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
|
||||
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
|
||||
defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in)
|
||||
|
@ -692,7 +691,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out
|
|||
}
|
||||
out.Type = LimitType(in.Type)
|
||||
if in.Max != nil {
|
||||
out.Max = make(map[ResourceName]resource.Quantity)
|
||||
out.Max = make(ResourceList)
|
||||
for key, val := range in.Max {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -704,7 +703,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out
|
|||
out.Max = nil
|
||||
}
|
||||
if in.Min != nil {
|
||||
out.Min = make(map[ResourceName]resource.Quantity)
|
||||
out.Min = make(ResourceList)
|
||||
for key, val := range in.Min {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -716,7 +715,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out
|
|||
out.Min = nil
|
||||
}
|
||||
if in.Default != nil {
|
||||
out.Default = make(map[ResourceName]resource.Quantity)
|
||||
out.Default = make(ResourceList)
|
||||
for key, val := range in.Default {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1006,7 +1005,7 @@ func convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus
|
|||
defaulting.(func(*api.NodeStatus))(in)
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1216,7 +1215,7 @@ func convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(i
|
|||
out.AccessModes = nil
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1330,7 +1329,7 @@ func convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.Persist
|
|||
defaulting.(func(*api.PersistentVolumeSpec))(in)
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1735,7 +1734,7 @@ func convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuota
|
|||
defaulting.(func(*api.ResourceQuotaSpec))(in)
|
||||
}
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[ResourceName]resource.Quantity)
|
||||
out.Hard = make(ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1754,7 +1753,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ
|
|||
defaulting.(func(*api.ResourceQuotaStatus))(in)
|
||||
}
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[ResourceName]resource.Quantity)
|
||||
out.Hard = make(ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1766,7 +1765,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ
|
|||
out.Hard = nil
|
||||
}
|
||||
if in.Used != nil {
|
||||
out.Used = make(map[ResourceName]resource.Quantity)
|
||||
out.Used = make(ResourceList)
|
||||
for key, val := range in.Used {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1785,7 +1784,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc
|
|||
defaulting.(func(*api.ResourceRequirements))(in)
|
||||
}
|
||||
if in.Limits != nil {
|
||||
out.Limits = make(map[ResourceName]resource.Quantity)
|
||||
out.Limits = make(ResourceList)
|
||||
for key, val := range in.Limits {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -1797,7 +1796,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc
|
|||
out.Limits = nil
|
||||
}
|
||||
if in.Requests != nil {
|
||||
out.Requests = make(map[ResourceName]resource.Quantity)
|
||||
out.Requests = make(ResourceList)
|
||||
for key, val := range in.Requests {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -2942,7 +2941,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap
|
|||
}
|
||||
out.Type = api.LimitType(in.Type)
|
||||
if in.Max != nil {
|
||||
out.Max = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Max = make(api.ResourceList)
|
||||
for key, val := range in.Max {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -2954,7 +2953,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap
|
|||
out.Max = nil
|
||||
}
|
||||
if in.Min != nil {
|
||||
out.Min = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Min = make(api.ResourceList)
|
||||
for key, val := range in.Min {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -2966,7 +2965,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap
|
|||
out.Min = nil
|
||||
}
|
||||
if in.Default != nil {
|
||||
out.Default = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Default = make(api.ResourceList)
|
||||
for key, val := range in.Default {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -3256,7 +3255,7 @@ func convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus
|
|||
defaulting.(func(*NodeStatus))(in)
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Capacity = make(api.ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -3466,7 +3465,7 @@ func convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(i
|
|||
out.AccessModes = nil
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Capacity = make(api.ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -3580,7 +3579,7 @@ func convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentV
|
|||
defaulting.(func(*PersistentVolumeSpec))(in)
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Capacity = make(api.ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -3985,7 +3984,7 @@ func convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec
|
|||
defaulting.(func(*ResourceQuotaSpec))(in)
|
||||
}
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Hard = make(api.ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -4004,7 +4003,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota
|
|||
defaulting.(func(*ResourceQuotaStatus))(in)
|
||||
}
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Hard = make(api.ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -4016,7 +4015,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota
|
|||
out.Hard = nil
|
||||
}
|
||||
if in.Used != nil {
|
||||
out.Used = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Used = make(api.ResourceList)
|
||||
for key, val := range in.Used {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -4035,7 +4034,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq
|
|||
defaulting.(func(*ResourceRequirements))(in)
|
||||
}
|
||||
if in.Limits != nil {
|
||||
out.Limits = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Limits = make(api.ResourceList)
|
||||
for key, val := range in.Limits {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
@ -4047,7 +4046,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq
|
|||
out.Limits = nil
|
||||
}
|
||||
if in.Requests != nil {
|
||||
out.Requests = make(map[api.ResourceName]resource.Quantity)
|
||||
out.Requests = make(api.ResourceList)
|
||||
for key, val := range in.Requests {
|
||||
newVal := resource.Quantity{}
|
||||
if err := s.Convert(&val, &newVal, 0); err != nil {
|
||||
|
|
|
@ -18,13 +18,13 @@ package v1
|
|||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"speter.net/go/exp/math/dec/inf"
|
||||
"time"
|
||||
api "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
util "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
inf "speter.net/go/exp/math/dec/inf"
|
||||
time "time"
|
||||
)
|
||||
|
||||
func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error {
|
||||
|
@ -600,7 +600,7 @@ func deepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner
|
|||
func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error {
|
||||
out.Type = in.Type
|
||||
if in.Max != nil {
|
||||
out.Max = make(map[ResourceName]resource.Quantity)
|
||||
out.Max = make(ResourceList)
|
||||
for key, val := range in.Max {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -612,7 +612,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve
|
|||
out.Max = nil
|
||||
}
|
||||
if in.Min != nil {
|
||||
out.Min = make(map[ResourceName]resource.Quantity)
|
||||
out.Min = make(ResourceList)
|
||||
for key, val := range in.Min {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -624,7 +624,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve
|
|||
out.Min = nil
|
||||
}
|
||||
if in.Default != nil {
|
||||
out.Default = make(map[ResourceName]resource.Quantity)
|
||||
out.Default = make(ResourceList)
|
||||
for key, val := range in.Default {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -860,7 +860,7 @@ func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) erro
|
|||
|
||||
func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error {
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1044,7 +1044,7 @@ func deepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out
|
|||
out.AccessModes = nil
|
||||
}
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1146,7 +1146,7 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
|
|||
|
||||
func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error {
|
||||
if in.Capacity != nil {
|
||||
out.Capacity = make(map[ResourceName]resource.Quantity)
|
||||
out.Capacity = make(ResourceList)
|
||||
for key, val := range in.Capacity {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1580,7 +1580,7 @@ func deepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList,
|
|||
|
||||
func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error {
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[ResourceName]resource.Quantity)
|
||||
out.Hard = make(ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1596,7 +1596,7 @@ func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec,
|
|||
|
||||
func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error {
|
||||
if in.Hard != nil {
|
||||
out.Hard = make(map[ResourceName]resource.Quantity)
|
||||
out.Hard = make(ResourceList)
|
||||
for key, val := range in.Hard {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1608,7 +1608,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS
|
|||
out.Hard = nil
|
||||
}
|
||||
if in.Used != nil {
|
||||
out.Used = make(map[ResourceName]resource.Quantity)
|
||||
out.Used = make(ResourceList)
|
||||
for key, val := range in.Used {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1624,7 +1624,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS
|
|||
|
||||
func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error {
|
||||
if in.Limits != nil {
|
||||
out.Limits = make(map[ResourceName]resource.Quantity)
|
||||
out.Limits = make(ResourceList)
|
||||
for key, val := range in.Limits {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
@ -1636,7 +1636,7 @@ func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequ
|
|||
out.Limits = nil
|
||||
}
|
||||
if in.Requests != nil {
|
||||
out.Requests = make(map[ResourceName]resource.Quantity)
|
||||
out.Requests = make(ResourceList)
|
||||
for key, val := range in.Requests {
|
||||
newVal := new(resource.Quantity)
|
||||
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
|
||||
|
|
|
@ -19,40 +19,69 @@ package runtime
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
type ConversionGenerator interface {
|
||||
GenerateConversionsForType(version string, reflection reflect.Type) error
|
||||
WriteConversionFunctions(w io.Writer) error
|
||||
RegisterConversionFunctions(w io.Writer) error
|
||||
RegisterConversionFunctions(w io.Writer, pkg string) error
|
||||
AddImport(pkg string) string
|
||||
RepackImports(exclude util.StringSet)
|
||||
WriteImports(w io.Writer) error
|
||||
OverwritePackage(pkg, overwrite string)
|
||||
AssumePrivateConversions()
|
||||
}
|
||||
|
||||
func NewConversionGenerator(scheme *conversion.Scheme) ConversionGenerator {
|
||||
return &conversionGenerator{
|
||||
func NewConversionGenerator(scheme *conversion.Scheme, targetPkg string) ConversionGenerator {
|
||||
g := &conversionGenerator{
|
||||
scheme: scheme,
|
||||
targetPkg: targetPkg,
|
||||
convertibles: make(map[reflect.Type]reflect.Type),
|
||||
pkgOverwrites: make(map[string]string),
|
||||
imports: make(map[string]string),
|
||||
shortImports: make(map[string]string),
|
||||
}
|
||||
g.targetPackage(targetPkg)
|
||||
g.AddImport("reflect")
|
||||
g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/conversion")
|
||||
return g
|
||||
}
|
||||
|
||||
var complexTypes []reflect.Kind = []reflect.Kind{reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct}
|
||||
|
||||
type conversionGenerator struct {
|
||||
scheme *conversion.Scheme
|
||||
targetPkg string
|
||||
convertibles map[reflect.Type]reflect.Type
|
||||
// If pkgOverwrites is set for a given package name, that package name
|
||||
// will be replaced while writing conversion function. If empty, package
|
||||
// name will be omitted.
|
||||
pkgOverwrites map[string]string
|
||||
// map of package names to shortname
|
||||
imports map[string]string
|
||||
// map of short names to package names
|
||||
shortImports map[string]string
|
||||
|
||||
// A buffer that is used for storing lines that needs to be written.
|
||||
linesToPrint []string
|
||||
|
||||
// if true, we assume conversions on the scheme are not available to us in the current package
|
||||
assumePrivateConversions bool
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) AssumePrivateConversions() {
|
||||
g.assumePrivateConversions = true
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) AddImport(pkg string) string {
|
||||
return g.addImportByPath(pkg)
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) GenerateConversionsForType(version string, reflection reflect.Type) error {
|
||||
|
@ -94,6 +123,10 @@ func (g *conversionGenerator) generateConversionsBetween(inType, outType reflect
|
|||
}
|
||||
return fmt.Errorf("cannot convert types of different kinds: %v %v", inType, outType)
|
||||
}
|
||||
|
||||
g.addImportByPath(inType.PkgPath())
|
||||
g.addImportByPath(outType.PkgPath())
|
||||
|
||||
// We should be able to generate conversions both sides.
|
||||
switch inType.Kind() {
|
||||
case reflect.Map:
|
||||
|
@ -155,11 +188,15 @@ func isComplexType(reflection reflect.Type) bool {
|
|||
func (g *conversionGenerator) generateConversionsForMap(inType, outType reflect.Type) error {
|
||||
inKey := inType.Key()
|
||||
outKey := outType.Key()
|
||||
g.addImportByPath(inKey.PkgPath())
|
||||
g.addImportByPath(outKey.PkgPath())
|
||||
if err := g.generateConversionsBetween(inKey, outKey); err != nil {
|
||||
return err
|
||||
}
|
||||
inValue := inType.Elem()
|
||||
outValue := outType.Elem()
|
||||
g.addImportByPath(inKey.PkgPath())
|
||||
g.addImportByPath(outKey.PkgPath())
|
||||
if err := g.generateConversionsBetween(inValue, outValue); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -238,6 +275,54 @@ func (s byName) Swap(i, j int) {
|
|||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) targetPackage(pkg string) {
|
||||
g.imports[pkg] = ""
|
||||
g.shortImports[""] = pkg
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) RepackImports(exclude util.StringSet) {
|
||||
var packages []string
|
||||
for key := range g.imports {
|
||||
packages = append(packages, key)
|
||||
}
|
||||
sort.Strings(packages)
|
||||
g.imports = make(map[string]string)
|
||||
g.shortImports = make(map[string]string)
|
||||
g.targetPackage(g.targetPkg)
|
||||
for _, pkg := range packages {
|
||||
if !exclude.Has(pkg) {
|
||||
g.addImportByPath(pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) WriteImports(w io.Writer) error {
|
||||
var packages []string
|
||||
for key := range g.imports {
|
||||
packages = append(packages, key)
|
||||
}
|
||||
sort.Strings(packages)
|
||||
|
||||
buffer := newBuffer()
|
||||
indent := 0
|
||||
buffer.addLine("import (\n", indent)
|
||||
for _, importPkg := range packages {
|
||||
if len(importPkg) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(g.imports[importPkg]) == 0 {
|
||||
continue
|
||||
}
|
||||
buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1)
|
||||
}
|
||||
buffer.addLine(")\n", indent)
|
||||
buffer.addLine("\n", indent)
|
||||
if err := buffer.flushLines(w); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) WriteConversionFunctions(w io.Writer) error {
|
||||
// It's desired to print conversion functions always in the same order
|
||||
// (e.g. for better tracking of what has really been added).
|
||||
|
@ -265,9 +350,9 @@ func (g *conversionGenerator) WriteConversionFunctions(w io.Writer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) writeRegisterHeader(b *buffer, indent int) {
|
||||
func (g *conversionGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) {
|
||||
b.addLine("func init() {\n", indent)
|
||||
b.addLine("err := api.Scheme.AddGeneratedConversionFuncs(\n", indent+1)
|
||||
b.addLine(fmt.Sprintf("err := %s.AddGeneratedConversionFuncs(\n", pkg), indent+1)
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) writeRegisterFooter(b *buffer, indent int) {
|
||||
|
@ -280,7 +365,7 @@ func (g *conversionGenerator) writeRegisterFooter(b *buffer, indent int) {
|
|||
b.addLine("\n", indent)
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) RegisterConversionFunctions(w io.Writer) error {
|
||||
func (g *conversionGenerator) RegisterConversionFunctions(w io.Writer, pkg string) error {
|
||||
// Write conversion function names alphabetically ordered.
|
||||
var names []string
|
||||
for inType, outType := range g.convertibles {
|
||||
|
@ -290,7 +375,7 @@ func (g *conversionGenerator) RegisterConversionFunctions(w io.Writer) error {
|
|||
|
||||
buffer := newBuffer()
|
||||
indent := 0
|
||||
g.writeRegisterHeader(buffer, indent)
|
||||
g.writeRegisterHeader(buffer, pkg, indent)
|
||||
for _, name := range names {
|
||||
buffer.addLine(fmt.Sprintf("%s,\n", name), indent+2)
|
||||
}
|
||||
|
@ -301,32 +386,74 @@ func (g *conversionGenerator) RegisterConversionFunctions(w io.Writer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) addImportByPath(pkg string) string {
|
||||
if name, ok := g.imports[pkg]; ok {
|
||||
return name
|
||||
}
|
||||
name := path.Base(pkg)
|
||||
if _, ok := g.shortImports[name]; !ok {
|
||||
g.imports[pkg] = name
|
||||
g.shortImports[name] = pkg
|
||||
return name
|
||||
}
|
||||
if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 {
|
||||
name = dirname + name
|
||||
if _, ok := g.shortImports[name]; !ok {
|
||||
g.imports[pkg] = name
|
||||
g.shortImports[name] = pkg
|
||||
return name
|
||||
}
|
||||
if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 {
|
||||
name = subdirname + name
|
||||
if _, ok := g.shortImports[name]; !ok {
|
||||
g.imports[pkg] = name
|
||||
g.shortImports[name] = pkg
|
||||
return name
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := 2; i < 100; i++ {
|
||||
generatedName := fmt.Sprintf("%s%d", name, i)
|
||||
if _, ok := g.shortImports[generatedName]; !ok {
|
||||
g.imports[pkg] = generatedName
|
||||
g.shortImports[generatedName] = pkg
|
||||
return generatedName
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports))
|
||||
}
|
||||
|
||||
func (g *conversionGenerator) typeName(inType reflect.Type) string {
|
||||
switch inType.Kind() {
|
||||
case reflect.Map:
|
||||
return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem()))
|
||||
case reflect.Slice:
|
||||
return fmt.Sprintf("[]%s", g.typeName(inType.Elem()))
|
||||
case reflect.Ptr:
|
||||
return fmt.Sprintf("*%s", g.typeName(inType.Elem()))
|
||||
case reflect.Map:
|
||||
if len(inType.Name()) == 0 {
|
||||
return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem()))
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
typeWithPkg := fmt.Sprintf("%s", inType)
|
||||
slices := strings.Split(typeWithPkg, ".")
|
||||
if len(slices) == 1 {
|
||||
pkg, name := inType.PkgPath(), inType.Name()
|
||||
if len(name) == 0 && inType.Kind() == reflect.Struct {
|
||||
return "struct{}"
|
||||
}
|
||||
if len(pkg) == 0 {
|
||||
// Default package.
|
||||
return slices[0]
|
||||
return name
|
||||
}
|
||||
if len(slices) == 2 {
|
||||
pkg := slices[0]
|
||||
if val, found := g.pkgOverwrites[pkg]; found {
|
||||
pkg = val
|
||||
}
|
||||
if pkg != "" {
|
||||
pkg = pkg + "."
|
||||
}
|
||||
return pkg + slices[1]
|
||||
if val, found := g.pkgOverwrites[pkg]; found {
|
||||
pkg = val
|
||||
}
|
||||
panic("Incorrect type name: " + typeWithPkg)
|
||||
if len(pkg) == 0 {
|
||||
return name
|
||||
}
|
||||
short := g.addImportByPath(pkg)
|
||||
if len(short) > 0 {
|
||||
return fmt.Sprintf("%s.%s", short, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -658,6 +785,10 @@ func (g *conversionGenerator) existsDedicatedConversionFunction(inType, outType
|
|||
// unnamed. Thus we return false here.
|
||||
return false
|
||||
}
|
||||
// TODO: no way to handle private conversions in different packages
|
||||
if g.assumePrivateConversions {
|
||||
return false
|
||||
}
|
||||
return g.scheme.Converter().HasConversionFunc(inType, outType)
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package runtime
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -38,9 +39,20 @@ type DeepCopyGenerator interface {
|
|||
// functions for this type and all nested types will be generated.
|
||||
AddType(inType reflect.Type) error
|
||||
|
||||
// ReplaceType registers a type that should be used instead of the type
|
||||
// with the provided pkgPath and name.
|
||||
ReplaceType(pkgPath, name string, in interface{})
|
||||
|
||||
// AddImport registers a package name with the generator and returns its
|
||||
// short name.
|
||||
AddImport(pkgPath string) string
|
||||
|
||||
// RepackImports creates a stable ordering of import short names
|
||||
RepackImports()
|
||||
|
||||
// Writes all imports that are necessary for deep-copy function and
|
||||
// their registration.
|
||||
WriteImports(w io.Writer, pkg string) error
|
||||
WriteImports(w io.Writer) error
|
||||
|
||||
// Writes deel-copy functions for all types added via AddType() method
|
||||
// and their nested types.
|
||||
|
@ -57,20 +69,80 @@ type DeepCopyGenerator interface {
|
|||
OverwritePackage(pkg, overwrite string)
|
||||
}
|
||||
|
||||
func NewDeepCopyGenerator(scheme *conversion.Scheme) DeepCopyGenerator {
|
||||
return &deepCopyGenerator{
|
||||
func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include util.StringSet) DeepCopyGenerator {
|
||||
g := &deepCopyGenerator{
|
||||
scheme: scheme,
|
||||
targetPkg: targetPkg,
|
||||
copyables: make(map[reflect.Type]bool),
|
||||
imports: util.StringSet{},
|
||||
imports: make(map[string]string),
|
||||
shortImports: make(map[string]string),
|
||||
pkgOverwrites: make(map[string]string),
|
||||
replace: make(map[pkgPathNamePair]reflect.Type),
|
||||
include: include,
|
||||
}
|
||||
g.targetPackage(targetPkg)
|
||||
g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/conversion")
|
||||
return g
|
||||
}
|
||||
|
||||
type pkgPathNamePair struct {
|
||||
PkgPath string
|
||||
Name string
|
||||
}
|
||||
|
||||
type deepCopyGenerator struct {
|
||||
scheme *conversion.Scheme
|
||||
copyables map[reflect.Type]bool
|
||||
imports util.StringSet
|
||||
scheme *conversion.Scheme
|
||||
targetPkg string
|
||||
copyables map[reflect.Type]bool
|
||||
// map of package names to shortname
|
||||
imports map[string]string
|
||||
// map of short names to package names
|
||||
shortImports map[string]string
|
||||
pkgOverwrites map[string]string
|
||||
replace map[pkgPathNamePair]reflect.Type
|
||||
include util.StringSet
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) addImportByPath(pkg string) string {
|
||||
if name, ok := g.imports[pkg]; ok {
|
||||
return name
|
||||
}
|
||||
name := path.Base(pkg)
|
||||
if _, ok := g.shortImports[name]; !ok {
|
||||
g.imports[pkg] = name
|
||||
g.shortImports[name] = pkg
|
||||
return name
|
||||
}
|
||||
if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 {
|
||||
name = dirname + name
|
||||
if _, ok := g.shortImports[name]; !ok {
|
||||
g.imports[pkg] = name
|
||||
g.shortImports[name] = pkg
|
||||
return name
|
||||
}
|
||||
if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 {
|
||||
name = subdirname + name
|
||||
if _, ok := g.shortImports[name]; !ok {
|
||||
g.imports[pkg] = name
|
||||
g.shortImports[name] = pkg
|
||||
return name
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := 2; i < 100; i++ {
|
||||
generatedName := fmt.Sprintf("%s%d", name, i)
|
||||
if _, ok := g.shortImports[generatedName]; !ok {
|
||||
g.imports[pkg] = generatedName
|
||||
g.shortImports[generatedName] = pkg
|
||||
return generatedName
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports))
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) targetPackage(pkg string) {
|
||||
g.imports[pkg] = ""
|
||||
g.shortImports[""] = pkg
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error {
|
||||
|
@ -90,11 +162,18 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error {
|
|||
return err
|
||||
}
|
||||
case reflect.Interface:
|
||||
g.imports.Insert(inType.PkgPath())
|
||||
g.addImportByPath(inType.PkgPath())
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
g.imports.Insert(inType.PkgPath())
|
||||
if !strings.HasPrefix(inType.PkgPath(), "github.com/GoogleCloudPlatform/kubernetes") {
|
||||
g.addImportByPath(inType.PkgPath())
|
||||
found := false
|
||||
for s := range g.include {
|
||||
if strings.HasPrefix(inType.PkgPath(), s) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < inType.NumField(); i++ {
|
||||
|
@ -110,6 +189,15 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) AddImport(pkg string) string {
|
||||
return g.addImportByPath(pkg)
|
||||
}
|
||||
|
||||
// ReplaceType registers a replacement type to be used instead of the named type
|
||||
func (g *deepCopyGenerator) ReplaceType(pkgPath, name string, t interface{}) {
|
||||
g.replace[pkgPathNamePair{pkgPath, name}] = reflect.TypeOf(t)
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) AddType(inType reflect.Type) error {
|
||||
if inType.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("non-struct copies are not supported")
|
||||
|
@ -117,10 +205,23 @@ func (g *deepCopyGenerator) AddType(inType reflect.Type) error {
|
|||
return g.addAllRecursiveTypes(inType)
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error {
|
||||
func (g *deepCopyGenerator) RepackImports() {
|
||||
var packages []string
|
||||
for key := range g.imports {
|
||||
packages = append(packages, key)
|
||||
}
|
||||
sort.Strings(packages)
|
||||
g.imports = make(map[string]string)
|
||||
g.shortImports = make(map[string]string)
|
||||
|
||||
g.targetPackage(g.targetPkg)
|
||||
for _, pkg := range packages {
|
||||
g.addImportByPath(pkg)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) WriteImports(w io.Writer) error {
|
||||
var packages []string
|
||||
packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/api")
|
||||
packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion")
|
||||
for key := range g.imports {
|
||||
packages = append(packages, key)
|
||||
}
|
||||
|
@ -130,10 +231,13 @@ func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error {
|
|||
indent := 0
|
||||
buffer.addLine("import (\n", indent)
|
||||
for _, importPkg := range packages {
|
||||
if strings.HasSuffix(importPkg, pkg) {
|
||||
if len(importPkg) == 0 {
|
||||
continue
|
||||
}
|
||||
buffer.addLine(fmt.Sprintf("\"%s\"\n", importPkg), indent+1)
|
||||
if len(g.imports[importPkg]) == 0 {
|
||||
continue
|
||||
}
|
||||
buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1)
|
||||
}
|
||||
buffer.addLine(")\n", indent)
|
||||
buffer.addLine("\n", indent)
|
||||
|
@ -159,35 +263,47 @@ func (s byPkgAndName) Swap(i, j int) {
|
|||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) typeName(inType reflect.Type) string {
|
||||
func (g *deepCopyGenerator) nameForType(inType reflect.Type) string {
|
||||
switch inType.Kind() {
|
||||
case reflect.Map:
|
||||
return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem()))
|
||||
case reflect.Slice:
|
||||
return fmt.Sprintf("[]%s", g.typeName(inType.Elem()))
|
||||
case reflect.Ptr:
|
||||
return fmt.Sprintf("*%s", g.typeName(inType.Elem()))
|
||||
case reflect.Map:
|
||||
if len(inType.Name()) == 0 {
|
||||
return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem()))
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
typeWithPkg := fmt.Sprintf("%s", inType)
|
||||
slices := strings.Split(typeWithPkg, ".")
|
||||
if len(slices) == 1 {
|
||||
pkg, name := inType.PkgPath(), inType.Name()
|
||||
if len(name) == 0 && inType.Kind() == reflect.Struct {
|
||||
return "struct{}"
|
||||
}
|
||||
if len(pkg) == 0 {
|
||||
// Default package.
|
||||
return slices[0]
|
||||
return name
|
||||
}
|
||||
if len(slices) == 2 {
|
||||
pkg := slices[0]
|
||||
if val, found := g.pkgOverwrites[pkg]; found {
|
||||
pkg = val
|
||||
}
|
||||
if pkg != "" {
|
||||
pkg = pkg + "."
|
||||
}
|
||||
return pkg + slices[1]
|
||||
if val, found := g.pkgOverwrites[pkg]; found {
|
||||
pkg = val
|
||||
}
|
||||
panic("Incorrect type name: " + typeWithPkg)
|
||||
if len(pkg) == 0 {
|
||||
return name
|
||||
}
|
||||
short := g.addImportByPath(pkg)
|
||||
if len(short) > 0 {
|
||||
return fmt.Sprintf("%s.%s", short, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) typeName(inType reflect.Type) string {
|
||||
if t, ok := g.replace[pkgPathNamePair{inType.PkgPath(), inType.Name()}]; ok {
|
||||
return g.nameForType(t)
|
||||
}
|
||||
return g.nameForType(inType)
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) deepCopyFunctionName(inType reflect.Type) string {
|
||||
funcNameFormat := "deepCopy_%s_%s"
|
||||
inPkg := packageForName(inType)
|
||||
|
@ -442,12 +558,8 @@ func (g *deepCopyGenerator) writeDeepCopyForType(b *buffer, inType reflect.Type,
|
|||
|
||||
func (g *deepCopyGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) {
|
||||
b.addLine("func init() {\n", indent)
|
||||
registerFormat := "err := %sScheme.AddGeneratedDeepCopyFuncs(\n"
|
||||
if pkg == "api" {
|
||||
b.addLine(fmt.Sprintf(registerFormat, ""), indent+1)
|
||||
} else {
|
||||
b.addLine(fmt.Sprintf(registerFormat, "api."), indent+1)
|
||||
}
|
||||
registerFormat := "err := %s.AddGeneratedDeepCopyFuncs(\n"
|
||||
b.addLine(fmt.Sprintf(registerFormat, pkg), indent+1)
|
||||
}
|
||||
|
||||
func (g *deepCopyGenerator) writeRegisterFooter(b *buffer, indent int) {
|
||||
|
|
Loading…
Reference in New Issue