diff --git a/README.md b/README.md index 07bb49a96..a89750bb3 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,31 @@ Please use [High Availability (HA) Install](https://rancher.com/docs/rancher/v2. RKE can be built using the `make` command, and will use the scripts in the `scripts` directory as subcommands. The default subcommand is `ci` and will use `scripts/ci`. Cross compiling can be enabled by setting the environment variable `CROSS=1`. The compiled binaries can be found in the `build/bin` directory. Dependencies are managed by Go modules and can be found in [go.mod](https://github.com/rancher/rke/blob/master/go.mod). +RKE now fetches `data.json` from https://github.com/rancher/kontainer-driver-metadata. To fetch data.json and compile it in rke, run + +```bash +go generate + +# Change RANCHER_METADATA_URL to an external URL instead of using https://releases.rancher.com/kontainer-driver-metadata/dev-v2.4/data.json by default +RANCHER_METADATA_URL=${URL} go generate + +# Or load it from local file +RANCHER_METATDATA_URL=./local/data.json go generate + +# Compile RKE +make +``` + +To specify RANCHER_METADATA_URL in runtime, populate the environment variable when running rke CLI. For example: + +```bash +RANCHER_METADATA_URL=${URL} rke [commands] [options] + +RANCHER_METATDATA_URL=${./local/data.json} rke [commands] [options] +``` + +`RANCHER_METADATA_URL` defaults to `https://releases.rancher.com/kontainer-driver-metadata/dev-v2.4/data.json`. + ## License Copyright (c) 2019 [Rancher Labs, Inc.](http://rancher.com) diff --git a/cluster/addons.go b/cluster/addons.go index 2af3bc22e..ec37fa812 100644 --- a/cluster/addons.go +++ b/cluster/addons.go @@ -12,7 +12,6 @@ import ( "strings" "time" - rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates" "github.com/rancher/rke/addons" "github.com/rancher/rke/authz" "github.com/rancher/rke/k8s" @@ -20,7 +19,8 @@ import ( "github.com/rancher/rke/services" "github.com/rancher/rke/templates" "github.com/rancher/rke/util" - "github.com/rancher/types/apis/management.cattle.io/v3" + v3 "github.com/rancher/types/apis/management.cattle.io/v3" + "github.com/rancher/types/kdm" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" @@ -285,7 +285,7 @@ func (c *Cluster) deployKubeDNS(ctx context.Context, data map[string]interface{} return err } KubeDNSConfig.LinearAutoscalerParams = string(linearModeBytes) - tmplt, err := templates.GetVersionedTemplates(rkeData.KubeDNS, data, c.Version) + tmplt, err := templates.GetVersionedTemplates(kdm.KubeDNS, data, c.Version) if err != nil { return err } @@ -318,7 +318,7 @@ func (c *Cluster) deployCoreDNS(ctx context.Context, data map[string]interface{} return err } CoreDNSConfig.LinearAutoscalerParams = string(linearModeBytes) - tmplt, err := templates.GetVersionedTemplates(rkeData.CoreDNS, data, c.Version) + tmplt, err := templates.GetVersionedTemplates(kdm.CoreDNS, data, c.Version) if err != nil { return err } @@ -364,7 +364,7 @@ func (c *Cluster) deployMetricServer(ctx context.Context, data map[string]interf UpdateStrategy: c.Monitoring.UpdateStrategy, Replicas: c.Monitoring.Replicas, } - tmplt, err := templates.GetVersionedTemplates(rkeData.MetricsServer, data, c.Version) + tmplt, err := templates.GetVersionedTemplates(kdm.MetricsServer, data, c.Version) if err != nil { return err } @@ -531,7 +531,7 @@ func (c *Cluster) deployIngress(ctx context.Context, data map[string]interface{} ingressConfig.AlpineImage = c.SystemImages.Alpine } } - tmplt, err := templates.GetVersionedTemplates(rkeData.NginxIngress, data, c.Version) + tmplt, err := templates.GetVersionedTemplates(kdm.NginxIngress, data, c.Version) if err != nil { return err } diff --git a/cluster/cluster.go b/cluster/cluster.go index 32bd4451b..bfa76ac3a 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -556,7 +556,9 @@ func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngin }, } if metadata.K8sVersionToRKESystemImages == nil { - metadata.InitMetadata(ctx) + if err := metadata.InitMetadata(ctx); err != nil { + return nil, err + } } if len(c.ConfigPath) == 0 { c.ConfigPath = pki.ClusterConfig diff --git a/cluster/encryption.go b/cluster/encryption.go index 93d756db9..305dafb77 100644 --- a/cluster/encryption.go +++ b/cluster/encryption.go @@ -381,9 +381,6 @@ func resolveCustomEncryptionConfig(clusterFile string) (string, *apiserverconfig if err != nil { return clusterFile, nil, fmt.Errorf("error unmarshalling: %v", err) } - if err != nil { - return "", nil, fmt.Errorf("error unmarshalling encryption custom config: %v", err) - } services, ok := r["services"].(map[string]interface{}) if services == nil || !ok { return clusterFile, nil, nil diff --git a/codegen/codegen.go b/codegen/codegen.go new file mode 100644 index 000000000..16e0c581d --- /dev/null +++ b/codegen/codegen.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/rancher/rke/metadata" +) + +const ( + defaultURL = "https://releases.rancher.com/kontainer-driver-metadata/dev-v2.4/data.json" + dataFile = "data/data.json" +) + +// Codegen fetch data.json from https://releases.rancher.com/kontainer-driver-metadata/dev-v2.4/data.json and generates bindata +func main() { + u := os.Getenv(metadata.RancherMetadataURLEnv) + if u == "" { + u = defaultURL + } + data, err := http.Get(u) + if err != nil { + panic(fmt.Errorf("failed to fetch data.json from kontainer-driver-metadata repository")) + } + defer data.Body.Close() + + b, err := ioutil.ReadAll(data.Body) + if err != nil { + panic(err) + } + + fmt.Println("Writing data") + if err := ioutil.WriteFile(dataFile, b, 0755); err != nil { + return + } +} diff --git a/data/bindata.go b/data/bindata.go new file mode 100644 index 000000000..3d3b8c1ce --- /dev/null +++ b/data/bindata.go @@ -0,0 +1,246 @@ +// Code generated for package data by go-bindata DO NOT EDIT. (@generated) +// sources: +// data/data.json +package data + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dataDataJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x7b\x53\xdc\xba\xf6\x26\xfc\xff\xfe\x14\xaa\xce\x99\xda\x33\xbf\xc2\x7d\xe1\x16\xc2\x54\xea\x2d\x36\x90\xa4\x2b\x09\x30\x40\x92\x39\x75\x38\x45\x09\x5b\x74\x7b\x70\x4b\xde\x96\x4d\xc2\x61\x32\x9f\xfd\x2d\x5d\x6c\xcb\xd7\xb6\xfb\x06\x0d\xab\x4f\xd5\xd9\xc1\xd6\xdd\xd2\xb3\x96\x96\xd6\x7a\xf4\xf8\x07\xea\x7c\xde\xe3\xdf\x49\xc0\x5d\x46\x2f\x48\x70\xef\xda\xe4\xd4\x0f\x5d\x46\x79\x67\x1f\x3d\xfe\x81\x50\xe7\x7e\xd0\x1d\xf4\xf5\x1f\xa8\x43\x42\xdb\xe9\xec\x23\x1a\x79\xde\x86\x7c\x70\x17\xdd\x10\xec\xbb\x71\x02\xd4\xc1\x9e\xc7\x7e\x5a\x7e\xe0\xde\xbb\x1e\x19\x11\x91\xba\x13\x06\x11\xe9\x6c\xe8\xf7\x94\xd1\x87\x09\x8b\xb8\x85\xa3\x70\x2c\xde\xde\x62\x8f\x27\xaf\x6f\x5c\xea\x58\xd8\x71\x02\xc2\x45\x13\x3a\xfd\xae\xfc\x5f\xfc\x9a\x50\x7c\xe3\x11\x0b\x3b\x13\x97\x8b\x36\x5b\xbe\x17\x8d\x5c\xd9\xda\xce\x09\x9e\x10\xee\x63\x9b\x7c\x71\x6f\x89\xfd\x60\x7b\x64\xe3\x8b\x3b\x71\xc3\x73\x4c\x47\x24\xd8\xd0\xdd\x3b\xb0\x6d\x16\xd1\x70\xe3\x88\xdc\xe2\xc8\x0b\x2f\x42\x16\xe0\x11\x39\xf4\x30\xe7\xf1\xb3\x4b\xe6\x91\x00\x87\x72\x48\x6c\x46\x1d\xbe\xf1\x35\x0a\x71\xe8\xd2\xd1\x41\x5c\xef\x0f\x72\x33\x66\xec\x6e\xe3\x3b\xf6\x5c\xa7\xfc\xd5\x39\xe1\x2c\x0a\x6c\xf2\xbf\x22\x16\xe2\x8d\x13\xe6\x90\x73\xc2\xc3\xc0\xb5\x45\xc1\x71\x7f\x5c\xca\x89\x1d\x05\xc4\xf2\x59\x10\xca\xfe\xc6\x6f\xc4\xc0\x7a\x24\xb4\xfc\x80\xdc\x92\x20\x20\xc9\xa8\x58\xe1\x83\x4f\x64\x87\x87\x34\x24\x01\xc5\xde\xf0\x6c\xe3\xf8\x57\xf2\xcf\x4f\x8c\x87\x14\x4f\x92\x11\xf5\x03\x76\xeb\x7a\x2e\x1d\x15\xc6\x3a\x20\x7f\x47\x84\x87\x63\x82\x1d\x12\x58\xe4\x57\x18\x60\x4b\xfd\xc1\x65\xb5\xee\x2f\x91\xe5\x7f\x5b\xe7\x64\xc2\x42\x62\x1d\xcb\x04\xe5\x99\x47\x01\x8b\xfc\x38\x73\x26\xd7\x47\xf1\xa6\x3c\x53\xc4\x45\x9b\x27\xa4\x34\xdf\x37\x4e\x82\x24\x5b\x44\x43\x77\x42\x2c\x9b\xd1\x5b\x57\xf6\x43\xcc\x1d\x16\xb8\xff\x91\x5f\xa9\x7b\xb7\xc7\xbb\x2e\xeb\xdd\x0f\x6e\x48\x88\x07\xef\xcd\xe9\x96\x1b\xde\xdd\xed\xed\xad\xf4\x95\x9c\x10\x16\x56\x33\xc2\xf2\x18\xbb\x8b\xfc\xfc\x7c\xe5\x6a\x82\x58\x37\xd8\xbe\x23\x54\x4e\x67\xb1\x08\x92\x52\x42\x8f\x5b\xb6\xeb\x8f\x49\x60\xf1\xc8\x0d\xd5\x97\xb9\xfc\x72\x71\x7d\x7c\x78\xf4\xe9\x58\xfc\xff\xc5\xc1\xf5\x8f\xe1\xe5\xa7\xeb\x83\xe3\x8b\xeb\xc1\xe6\xde\xf5\xc7\xc3\xaf\xd7\x17\x9f\x0e\x36\x77\x76\x37\x2a\xd3\x6d\xee\xec\xc6\xe9\xb6\xf6\xb6\xcb\xd3\x1d\x7e\x3a\x38\xfc\x74\xb0\xd9\xbf\x3e\x3b\xfd\xf2\xcf\xc1\x56\x7f\xc7\x48\x76\xde\xa8\xd2\xf3\x46\x55\x9e\x57\x56\xd8\x11\x43\xf0\x3b\x05\x02\x8f\x84\x06\x10\x54\x2e\xe2\x39\x21\x42\x3c\x24\x34\x74\x6d\xf9\xf1\xad\x90\xdd\x11\x6a\xfd\x54\xab\xae\x50\x94\x39\x4f\xac\x09\x73\x88\x48\xa1\x97\x68\x9c\xc8\x96\xb3\x97\x5b\x3e\x09\xac\xbf\x99\xfa\x80\x46\x19\x36\x75\xad\x1b\x97\x5a\x8e\x1b\x88\x57\x3d\xe6\x87\x3d\x9b\xba\xbd\x1b\x97\x9a\x49\xc4\xe4\x4c\xd2\x90\xd0\x96\x69\x28\x09\xbb\x4e\x0a\x5e\xb7\x2c\xb0\x89\x45\x99\x43\x2c\x31\x08\x36\x0e\x05\x9c\x89\x1c\x49\x9a\x7b\x42\x43\xeb\x6f\x9f\x67\xc0\x60\x82\xef\x88\xe5\xfa\x32\x35\xb7\xa2\xd0\xf5\x2c\x7b\x8c\x35\xee\x99\xfd\xa5\x24\xfc\xc9\x82\x3b\x0d\x8b\xe2\xad\x4d\xdd\x74\xf5\x61\xc7\x62\xd4\x7b\x28\xa2\x4d\x40\x38\xf3\xee\x65\x1f\x92\xf6\xab\x67\x5d\xf9\x2c\x59\x0c\x01\xc1\x13\x97\x8e\x44\x4a\x4a\x24\x94\x59\xae\xe3\x11\x4b\xac\x4f\x16\xc9\x42\xb7\xfa\x13\x58\x1c\xfd\x9d\x78\x0c\xee\x45\x9f\x37\x93\xbf\x98\x17\x4d\x88\xfe\x3e\xc9\x74\xb9\xc7\x41\xcf\x73\x6f\x7a\x7a\x11\xf5\x54\xaa\x58\xb6\xe5\x56\x99\x1f\xb0\x5f\x0f\xe9\x3a\x1b\x13\xec\x85\xe3\xff\x58\x79\xc9\x39\xd8\x7c\x2b\x97\xdd\x20\xd7\x90\x5c\x69\x87\x8c\x86\x01\xf3\x3c\x12\x34\x5d\xba\x36\x0e\xf5\x1c\xb6\x5d\x27\x28\x4c\x41\xb5\xba\x23\x1a\xe2\xd1\x88\x38\x96\xed\xb1\xa8\xb0\xc2\x15\x90\x0b\x60\x96\xaf\xad\x80\x45\x7a\x72\x64\x56\xba\x96\xf6\x63\xc6\x43\x1f\x87\x63\xcb\x0f\xd8\xbd\x2b\x64\xac\x6c\x6c\x36\xad\xa7\xa5\x98\x47\xec\xb0\xb0\x2a\x44\x63\x27\x8c\xba\x21\x13\xa2\x0a\xdb\x44\xac\x75\x97\xc9\x76\x6d\xf7\x79\x22\x2a\x99\x63\x91\x7b\x25\xa1\xcd\x19\xbd\x33\x31\xd2\x54\x8a\xd3\x90\x04\x13\x97\xe2\x90\x38\x96\x28\x68\x64\x5b\xe1\x38\x20\x7c\xcc\x3c\x59\xcf\xa0\xdf\xef\x57\x7f\x0a\x6e\x8f\x89\x13\x35\xfc\x0a\x75\x7d\xad\x6e\x9f\x59\xe9\x1f\xba\x62\xa9\xde\x0d\x40\xbd\x03\xf5\x0e\xd4\x3b\x50\xef\x40\xbd\x03\xf5\x0e\xd4\x3b\x50\xef\x40\xbd\x7b\x49\xea\xdd\x26\xa8\x77\xa0\xde\x81\x7a\x07\xea\x1d\xa8\x77\xa0\xde\x81\x7a\x07\xea\x1d\xa8\x77\x2f\x49\xbd\xdb\x02\xf5\x0e\xd4\x3b\x50\xef\x40\xbd\x03\xf5\x0e\xd4\x3b\x50\xef\x40\xbd\x03\xf5\xee\x25\xa9\x77\xdb\xa0\xde\x2d\x58\xbd\xdb\x38\x0b\x5c\x16\xb8\xe1\x03\xe8\x79\xa0\xe7\xbd\x00\x51\x06\x7a\x1e\xe8\x79\xa0\xe7\x81\x9e\x07\x7a\xde\x5a\xeb\x79\xdd\x41\xdf\x0a\x30\xb5\xc7\x24\x18\x58\x05\x97\x3c\xdd\x1c\xdb\x73\x05\x34\xd9\x24\x08\x13\x5c\xce\x54\x4c\x48\x60\x55\x26\xca\xcd\x3a\xd0\x1d\x41\x77\x04\xdd\xf1\x55\x8b\x47\xd0\x1d\x41\x77\x04\xdd\x11\x74\x47\xd0\x1d\xd7\x5b\x77\x7c\x07\xaa\x23\xa8\x8e\xa0\x3a\x82\xea\x08\xaa\x23\xa8\x8e\xa0\x3a\x3e\xf9\xe2\x00\xd5\x11\x54\xc7\xb5\x50\x1d\x77\xe0\x78\x79\x59\x7a\xde\xc6\x25\x76\x69\x28\xde\xf2\xbf\x1e\x0e\x19\x75\x5c\xf5\x9a\x04\xdc\xe5\x21\xa1\xe1\x77\xb9\xd2\x0f\x3d\xec\x4e\xce\x09\x77\xff\x43\x40\x33\x04\xcd\xf0\x05\x08\xbf\x19\x35\x43\xd0\xfc\x40\xf3\x5b\xff\xc9\x0f\x9a\x1f\x68\x7e\xeb\xa1\xf9\xc1\x81\x33\x68\x93\xa0\x4d\x82\x36\xf9\xcc\x05\x2a\x68\x93\xa0\x4d\xbe\xda\xc9\x0f\xda\x24\x68\x93\x6b\xa2\x4d\xee\xa6\xca\x64\x81\x71\x06\x94\x49\x50\x26\x41\x99\x04\x65\xf2\xc9\xe5\x29\x28\x93\xa0\x4c\xbe\xda\xc9\x0f\xca\x24\x28\x93\x6b\xa2\x4c\xbe\x05\xcb\x24\x28\x93\xa0\x4c\x82\x32\xf9\x9c\xe5\x29\x28\x93\xa0\x4c\xbe\xda\xc9\x0f\xca\x24\x28\x93\x6b\xa2\x4c\x42\x70\x0c\x28\x93\xa0\x4c\x82\x32\xf9\xac\xe5\x29\x28\x93\xa0\x4c\xbe\xda\xc9\x0f\xca\x24\x28\x93\x6b\xa1\x4c\xee\x42\xb8\x0c\x68\x7e\xa0\xf9\x81\xe6\x07\x9a\x1f\x68\x7e\xa0\xf9\x81\xe6\x07\x9a\xdf\x6b\xd1\xfc\xba\x5b\x60\x46\x04\x65\x12\x94\x49\x50\x26\x9f\xb3\x3c\x05\x65\x12\x94\xc9\x57\x3b\xf9\x41\x99\x04\x65\x72\x4d\x94\xc9\x6d\x50\x26\x41\x99\x04\x65\x12\x94\xc9\xe7\x2c\x4f\x41\x99\x04\x65\xf2\xd5\x4e\x7e\x50\x26\x41\x99\x5c\x13\x65\x72\x17\x94\x49\x50\x26\x41\x99\x04\x65\xf2\x39\xcb\x53\x50\x26\x41\x99\x7c\xb5\x93\x1f\x94\x49\x50\x26\xd7\x4f\x99\x04\x1e\x1f\x50\x26\x41\x99\x04\x65\xf2\xf9\xc9\x53\x50\x26\x41\x99\x7c\xb5\x93\x1f\x94\x49\x50\x26\xd7\x44\x99\x04\x1e\x1f\x50\x26\x41\x99\x04\x65\xf2\x59\xcb\x53\x50\x26\x41\x99\x7c\xb5\x93\x1f\x94\x49\x50\x26\xd7\x42\x99\x7c\x3b\xa3\xfe\xa8\x3f\xcf\xfd\x26\x28\x96\xa0\x58\x82\x62\x09\x8a\x25\x28\x96\xa0\x58\x82\x62\x09\x8a\x25\x28\x96\xa0\x58\xde\x0f\xba\xef\xda\x50\xfa\x24\x3a\x99\xad\xe6\x8d\x28\x33\xa7\x74\x4d\x51\xd1\xf2\x5a\xcf\x17\x7c\x43\xbc\x52\x4d\x2d\xab\x5c\x55\xea\x6d\x39\xad\x2b\x3b\x0f\x41\x47\xad\xd7\x51\x41\xd1\x04\x45\xf3\x05\xc8\xda\x19\x15\xcd\xf9\x20\x02\x14\x51\x50\x44\xd7\x61\x71\x80\x22\x0a\x8a\xe8\x73\x56\x44\xff\x90\x75\x77\x3e\xef\xf1\xef\x42\x37\x64\xf4\xfc\xf3\xf1\xc5\x03\x0f\xc9\x64\x38\xc1\x23\xf9\x39\x1e\x63\x23\x68\xbf\x5b\x77\x67\x77\x47\xbf\xea\xd9\x2c\x20\x8c\x5b\xe2\xf1\xfe\xfd\x56\x77\xd0\x1d\xe8\x89\xdf\xc1\x9e\xef\x52\x62\xa6\x0d\xee\x88\x15\x32\xe6\xf1\xfd\xfb\x7e\x77\xd0\xdd\xd6\x09\xe9\xc8\xa5\xbf\xce\xf4\x14\x9f\x96\xd8\x26\x41\x78\xc4\x7e\x52\x8f\x89\x01\x69\x90\x41\xcc\xf8\x80\x92\x90\x70\xad\x0e\xf2\x0b\xd7\x21\x36\x6e\x9a\xd7\x51\x90\x1b\xa7\xbc\xdb\xe3\x96\x43\xb9\x25\x5e\xc9\x7f\xe0\x89\xb3\xbb\xbd\x3f\xe8\x0e\xb6\xbb\x7b\x3a\x9b\x43\xf9\x04\xf3\xbf\xcb\xb2\xe9\x57\x16\xc5\x94\x3e\x94\xe5\xd5\x55\x96\xb4\x31\x2e\x82\xab\x57\x35\x99\x0f\xa2\x90\x71\x1b\x7b\xd9\xf1\xb1\xbd\x88\x87\x24\x10\x4b\x4a\x08\x03\x97\x51\xec\x09\x71\xab\x93\x26\xe5\x25\xb3\xce\x18\x3a\xb3\x9c\xf1\x83\x4f\x02\xf1\x6a\x3f\x3f\x4d\x74\xb6\x5b\x0f\x53\x4a\xbc\x92\x59\xa2\xdf\x88\x41\x7e\xd7\xcd\x25\x3f\xa4\x6e\x75\x0e\xcb\xa6\xae\xc8\xb5\x99\xb4\xcd\xc6\x9e\x6b\xb3\x13\x2d\xde\x93\x5c\xf2\xa9\x84\x2a\x3d\x1b\x33\xc9\xf3\x75\xa8\xd4\xb2\xec\x92\xc4\xa1\x57\x96\x38\xf4\xf6\xef\x37\x8d\x41\xb2\x31\xc5\x5e\x8b\x76\x50\x5c\xe8\x6a\x65\x33\x28\xf6\x3e\xb4\x1b\xcc\x9f\x04\xdf\x93\xb8\x35\xf2\x0f\xa1\x1c\xf0\x9e\xfc\xa7\x9c\xb4\xfb\x9b\xdd\x41\x77\xd3\x4c\xae\x9b\x53\x48\x4d\x7d\x3b\x93\xd8\x67\xce\x90\xde\x06\x58\xc8\x0f\xec\xd2\xec\xec\xf2\x71\xc4\x89\x9e\x43\x5b\x49\x73\x5c\x3a\x8a\x01\x2d\x4e\x28\x57\xbb\xa5\x5f\xc4\x9b\x5b\x8f\x04\xfb\x7d\x31\x99\x36\xe3\xc9\xb4\x95\x2d\xe1\xaf\x54\x17\x9f\x56\x90\xe5\xa8\x3d\x9d\x56\xdf\xf7\xc5\x72\xce\x1c\xef\xf4\xbb\x83\xb8\x96\xcd\xe5\x20\xdb\x5e\x1b\x64\xdb\x6b\x8b\x6c\x7b\x73\x20\xdb\x1e\x20\x5b\x73\x64\x4b\xa7\x09\x20\x1b\x20\xdb\x7a\x20\xdb\x60\xc9\x4a\xdb\x60\xab\x0d\xb6\x25\xa9\x1b\x83\x5b\x92\x63\x16\x74\xcb\x64\x06\x78\x9b\x06\x6f\x03\xd0\xdc\x00\xdf\x16\x83\x6f\xbb\x29\xbe\x0d\x16\x8a\x6f\xaa\xac\x09\x09\x03\xd7\x96\x48\x90\x6d\xbc\x7e\x61\x71\xf9\x46\xf7\x42\x4e\xb7\x41\x01\x1a\x37\x01\x1a\x01\x1a\x1b\x42\x63\x61\x36\x03\x34\x02\x34\xbe\x58\x68\xdc\x5a\xf2\x7e\x78\xd0\x6f\x85\x8c\xfd\xd6\xc8\x58\x5c\xdd\x2d\x90\xb1\x0f\xc8\xd8\x1c\x19\xb7\x60\x4f\x0c\xc0\xb8\x5e\x7b\xe2\x9d\x65\xeb\x7d\x80\x6e\x2f\x05\xdd\x76\x40\xed\x03\x74\x5b\x5b\x74\x2b\x90\xe8\xc0\xae\x16\xd0\x0d\xd0\x0d\xd0\xed\x15\x6e\x6a\x07\xdd\x59\x4e\x42\x36\xbb\x83\xbd\x57\x0d\x8c\x89\xba\x38\x0b\x32\xe6\x75\xcd\x76\xd0\x98\xcf\xbd\x4a\x6c\x34\x67\x0b\x60\x23\x60\xe3\x4b\xc7\xc6\x59\x8e\x42\x00\x1b\x5f\x2b\x36\xc2\x61\x08\x60\xe3\xab\xc4\xc6\xc6\x1b\xea\x56\xd8\xb8\xdb\x0a\x1b\x77\x5b\x63\xe3\xee\x3c\xd8\xb8\x0b\xd8\xb8\x02\x6c\x1c\xa4\xd5\xb4\x01\xc7\xad\xb6\xe0\xb8\xd5\x06\x1c\xb7\x96\x07\x8e\x5b\x2d\xc0\x71\xab\x35\x38\x1a\xa3\x09\xe8\xb8\x64\x74\xac\xbb\x9a\x1a\xd0\x11\xd0\x31\x8b\x8e\x5b\x80\x8e\x80\x8e\xaf\x08\x1d\x67\x39\x6a\x06\x74\x7c\xad\xe8\x38\xdb\x79\x0c\xa0\x23\xa0\xe3\x7a\xa2\x63\xdd\xe5\x81\x0b\x41\xc7\x9d\x56\xe8\xb8\xd3\x1a\x1d\x77\xe6\x41\xc7\x1d\x40\xc7\x56\xe8\xb8\x0b\xe8\x08\xe8\xf8\x8a\xd0\x71\x0f\xd0\x11\xd0\xb1\x31\x3a\xee\x01\x3a\x02\x3a\xbe\x22\x74\x7c\x07\xe8\x08\xe8\xd8\x18\x1d\xdf\x01\x3a\x02\x3a\xbe\x4e\x74\x5c\xca\x99\xf5\x66\x2b\x46\x9b\xcd\xd6\x94\x36\x9b\xf3\x70\xda\x6c\xce\x48\x6a\x03\xe8\x08\xe8\x08\xe8\xf8\xba\xd0\x71\x6b\x39\xa7\x32\xd6\x66\xbb\x73\x99\x24\x7d\x8b\x93\x99\x24\xcf\x6c\x67\x33\x99\xec\x80\x92\x80\x92\x80\x92\x80\x92\x29\x4a\x6e\xce\x44\xf7\xba\xd9\xdd\xdc\x6e\x8e\x92\xed\x30\xb2\x3d\x42\xce\x85\x8f\x33\xa2\xe3\xd6\x3c\xe8\xb8\x35\x17\x3a\x6e\x2d\x07\x1d\xc5\x27\xd6\x23\xa0\xff\xd9\xd3\xff\xdd\x1f\x88\x0f\x9e\x4d\xb6\x4a\x4c\xde\x9c\x91\x6b\x16\x30\x19\x30\x79\x0d\x30\x79\xab\x88\xc9\xb3\xc5\x30\x02\x26\x03\x26\xaf\x0e\x93\x67\x8b\x9c\x04\x4c\x06\x4c\x5e\x53\x4c\x9e\x55\x51\xce\x77\x6c\xfa\x05\x09\xad\x88\x85\xb6\x5b\x13\x0b\x6d\xcf\x43\x2c\xb4\xdd\x82\x58\x68\x46\x5c\x9e\x15\x91\x17\x88\xc5\x55\x28\x6c\x7e\x66\x51\xf3\xa2\xd1\x78\x16\x1c\x9e\x5d\x39\xae\xc8\x98\x83\xa9\x22\x14\xe7\xf3\x01\x26\xcf\x81\xc9\x3b\xd9\xe4\xf5\x98\x9c\x26\x9e\x8e\xc9\x33\xa3\xf1\xe6\x20\xfd\xc4\x0b\xe5\x30\xca\xcf\x9b\x86\xb0\xdc\x00\x90\x5b\x9c\x7e\xcd\x02\xc8\xad\xcc\xbc\xdb\xad\x8d\xbc\xdb\xf3\x98\x78\xb7\x5b\x18\x78\x01\x90\x01\x90\x01\x90\x01\x90\x97\x03\xc8\xb3\xc5\x88\xb6\xb0\x5a\xb4\xf2\x46\x18\xb4\xf6\x46\x18\xcc\xe3\x8d\x30\x98\xd5\x1b\x01\xac\x16\xab\xb4\x5a\x40\x64\xea\xfa\x01\x32\x58\x2d\x66\xb7\x5a\x6c\x2f\x19\x93\x37\xdf\xb6\xc1\xe4\x24\x75\x73\x0f\xb1\xb7\x73\x60\x72\x26\x33\x60\xf2\x73\xc5\xe4\x82\x1a\x02\x98\x0c\x98\xfc\x82\x31\x79\x36\xb6\x00\x38\xdd\x7b\x69\x98\xdc\x0c\x1d\x81\x2d\x60\xfd\xd0\x71\x69\x26\x84\x39\xd1\xd1\x34\x24\xac\x01\x3a\xb6\xb2\xea\x82\xc6\xfa\xb2\xd0\xf1\x39\x6b\xac\x80\xc9\x80\xc9\xaf\x09\x93\x67\x63\x70\x01\x8d\xf5\xa5\x61\x72\x33\x74\x04\x06\x17\x40\xc7\x57\x8a\x8e\xcb\xf5\x43\x78\x31\xaa\xeb\xcb\xf3\x43\x58\x96\x8a\xba\x52\xf8\x05\xf7\x83\x97\x8c\xc4\x2f\xdd\xfd\xe0\x2d\x28\xa9\xa0\xa4\x36\x46\xc9\xb7\xa0\xa4\x02\x34\xbe\x22\x25\xf5\x2d\x28\xa9\xa0\xa4\x3e\x1f\x25\x75\x76\xf8\x05\x25\xf5\x25\x23\xf1\x2b\x52\x52\x5b\x70\xd2\xcc\x02\xc3\x2f\x85\xba\xeb\xe5\xc1\xb0\xf9\x99\x9f\x49\xcc\x02\xc0\x31\xc0\xf1\x2b\x87\xe3\xed\xe5\xd8\x0c\x5e\x32\x45\x18\xd8\x0e\xc0\x76\xf0\xea\x21\xf2\x15\xd8\x0e\x66\x23\xe1\x9e\x41\x69\xdd\xda\x6e\x03\x96\x49\xea\xc6\x50\x99\xe4\x98\x05\x28\x33\x99\x41\x69\x7d\x7a\xa5\x75\x76\xa6\x46\x50\x5a\x5f\x32\x22\xbf\x6c\xa5\x75\x0b\xd8\xc1\xda\x2b\xab\xe9\xf4\x69\xaf\xab\xee\x74\xe7\x60\xb3\x2d\x64\x5e\xa5\xa6\xba\xf5\xac\x49\xba\xb6\x73\xc9\x6b\x01\xa9\x90\x78\x71\xb8\xb8\x9d\x4d\xdd\xb8\x19\x6b\x87\x8b\x2f\x5e\x53\xcd\x40\x23\x04\x0f\xbc\x56\x68\x9c\x72\xe2\xb5\xbb\x00\x6d\x15\x00\x19\x00\x19\x00\xb9\x09\x20\x2f\x9f\x5e\xfc\xc5\x5a\x58\x5f\x1e\x32\x37\xc4\xc8\xe7\x4c\xf7\x0d\x20\x09\x20\xb9\x4c\x90\x04\x26\xc3\xc6\x06\xd6\xf6\xf8\x38\x23\x32\x2e\x0e\x13\x67\xb1\xae\x2e\x42\x5f\x5d\x2d\x0a\xaf\xd4\xbc\x0a\x80\x0c\xe6\xd5\xc5\x9a\x57\x67\xb5\xaf\xb6\x46\xe3\x9d\x7e\x1b\x34\x4e\x52\x37\x46\xe3\x9d\xe2\x8a\x6f\x8e\xc6\x3b\x7d\x40\xe3\x67\x84\xc6\xb3\x1b\x0e\x00\x8d\x01\x8d\x0b\x68\xac\x5e\xf4\xbb\x9b\x3b\x45\x93\xd4\x73\xc4\xe4\x4d\xc0\x64\xc0\xe4\xe7\x86\xc9\x05\xa6\x39\xc0\x64\xc0\xe4\x57\x88\xc9\x9b\x80\xc9\x80\xc9\xcf\x0d\x93\x37\x01\x93\x01\x93\x5f\x1b\x26\x2f\x9b\xee\x1b\x5c\xc3\x5e\xcc\x29\xdb\x73\x66\xdd\x06\x74\x84\x43\xb6\x45\x1f\xb2\x6d\x03\x01\xc2\x0c\x04\x08\x2f\x48\x5b\x5d\x96\x2f\xd8\x4a\xb1\x17\x94\xd4\x97\x0c\xc3\x2f\xfd\x68\x0d\x6e\x3e\x00\xf5\xb4\x21\x44\x3e\x67\x8a\x6d\xc0\x45\x50\x4f\x17\xad\x9e\xce\x7a\xed\x01\xa8\xa7\xa0\x9e\x2e\x5a\x3d\x9d\x1d\x7b\x41\x3d\x7d\xc9\x30\xfc\x8a\xd4\x53\x20\xe7\x7a\x8d\x18\x6c\x7e\xe3\x67\x72\xa0\x05\x58\x0c\x58\xfc\x1a\xb1\x78\x56\x36\xef\xd7\xcd\x39\x03\x58\xbc\x4c\x2c\x5e\x13\x96\x44\xc0\x62\xc0\xe2\x85\x62\xf1\xac\xfc\x5f\x60\xb6\x7d\x75\x66\xdb\xd9\x39\xb9\xc0\x6c\xfb\xe2\x70\xf1\x15\x98\x6d\xdf\xad\x2c\x72\xb7\x95\x0f\xec\x76\x6b\x1f\xd8\xed\x79\x7c\x60\xb7\xc1\x07\xf6\x19\xa9\xa9\x6b\xc2\x8b\x08\x70\x0c\x6a\xea\x02\xd5\xd4\xed\x99\x78\x11\xcb\xe2\xdc\xc1\x7c\xfb\xfc\xb1\x78\xab\xd9\x11\x5a\x3a\xe3\xe7\xc4\xe0\xad\x56\x18\xbc\x3d\x07\xcf\x17\x60\x30\x60\xf0\x0a\x31\xb8\x70\xbe\xb0\x30\x0c\x6e\xac\x0f\x03\x06\xbf\x20\x0c\xae\xd6\x87\x01\x8b\x01\x8b\x01\x8b\x57\x89\xc5\xb3\x70\x2f\xce\x04\xc6\x3b\xad\x68\xc5\x76\x5a\xd3\x8a\xed\xcc\x43\x2b\xb6\xf3\x2a\x68\xc5\xd6\x06\x8c\x9f\x8e\x56\x6c\x67\x75\x68\x9c\x2c\x72\x5e\x92\x49\x4e\xb6\x14\x07\x38\xe0\xf9\x8b\x0e\xf1\x5d\x30\xaa\x6f\xad\x0a\xd4\xc1\x31\x02\x40\xbd\x21\xa8\x6f\x81\x86\x0d\x88\xfc\x0a\x35\xec\x59\xe2\xd9\x66\xc2\x62\x38\xfd\x03\x2c\x6e\x88\xc5\xe0\x30\x0c\x58\xfc\x1a\xb1\x78\x77\x65\x58\x0c\x1c\xea\x80\xc5\xcd\xb0\x78\x17\xb0\x18\xb0\xf8\x15\x62\xf1\x2c\xc1\x1b\xb3\x19\x9e\x5b\xe9\xc5\xc0\x0c\xf9\x8a\xb1\xf8\xe9\x82\x37\x56\x68\x77\x06\x2c\x06\xab\x71\x29\x22\xef\x01\x22\x03\x22\x3f\x33\x44\xde\x03\x44\x06\x44\x06\x44\x6e\xc1\x9e\x3e\x1b\x22\x0f\x5a\x21\xf2\xa0\x35\x22\x0f\xe6\x41\xe4\x01\x20\xf2\x33\x44\xe4\x95\x93\xa7\x03\x22\x03\x22\x3f\x35\x22\xcf\x12\xe6\x0c\x88\x0c\x88\xbc\x4c\x44\x7e\xba\x58\x3e\x40\x64\x40\xe4\x67\x84\xc8\xcb\x8d\x26\x01\x07\x66\x40\x64\x40\x64\x40\x64\x40\xe4\x5a\x44\xde\xe9\xae\x2c\xa4\x04\xbc\x8f\x01\x91\x1b\x21\xf2\x4e\x25\xb0\x4e\x01\x8e\x12\x76\x99\xa5\x7a\x59\xbc\x4d\x88\x67\x1a\x20\x72\x21\x71\xeb\x88\x92\x42\x09\x8b\xc3\x74\xb3\xe8\x69\x98\x9e\x4b\xdb\x0c\xd3\x07\x6d\x31\x7d\xb3\x0d\xa6\x6f\x2e\x1b\xd3\x9f\xab\x9f\xc6\x56\x1e\xcd\x21\x42\x10\x14\xec\x67\x07\xe7\xb3\x46\x08\x2e\x00\xcf\x5b\x69\xd8\x80\xe7\xaf\x05\xcf\x9f\xb7\x8e\x1e\xcb\x97\x9f\x2e\x75\xd8\x4f\x7e\x56\x37\x0a\x62\x80\x3d\x12\x5a\x6a\x34\x94\x2e\x9b\x17\x0a\xb3\x5c\xb5\x0f\x41\x2d\x20\x13\x96\x29\x13\x66\xbc\x69\x1f\x54\x7c\x10\x09\xaf\x5c\xc5\x5f\x59\xb8\x38\x84\xc5\x00\x9a\x37\x44\xf3\x19\xc3\xc5\x01\xcd\x01\xcd\x5f\x39\x9a\xcf\x72\xbf\x3f\xb8\x71\x03\x9a\x2f\x13\xcd\x67\xbc\xcc\x1a\xcc\x35\x80\xe6\xaf\xd3\x5c\x53\x8d\xe9\x4b\x76\x72\x01\x4c\x07\x4c\x07\x4c\x07\x4c\x07\x4c\x7f\xe6\x26\xf8\x95\xf1\x4a\x81\x48\x00\x91\xd0\x50\x24\xcc\xc8\x2b\x05\x22\x01\x44\xc2\xeb\x14\x09\xd5\x98\x0e\x6a\x3e\x60\x3a\x60\x3a\x60\x3a\x60\xfa\xba\x61\xfa\x92\xd4\xfc\xcd\x65\x8b\x04\x08\x38\x05\x91\xd0\x52\x24\xb4\xa3\x00\x00\x91\x00\x22\x01\x44\xc2\xfc\x22\x61\x77\x65\xbb\x04\x10\x09\x20\x12\x9a\x89\x84\x19\x59\x6c\x41\x24\x80\x48\x00\x91\x30\xbf\x48\x58\x1d\x99\x2e\xc4\x68\x81\x48\x68\x26\x12\x66\x24\xd3\x05\x91\x00\x22\x01\x44\xc2\xfc\x22\x61\x75\x4c\x65\x20\x12\x40\x24\x34\x13\x09\x33\xf2\xe2\x80\x48\x00\x91\x00\x22\x61\x5e\x91\xb0\x6b\x5e\xbc\xdf\x46\x24\x14\x3a\x01\xc7\xcb\xcf\x5f\x24\xbc\xcd\x63\x7c\xbd\x48\xd8\x4d\xd6\xda\x9c\x22\x21\xad\xb7\x89\x48\xd8\x9d\xf5\xe2\xfd\x55\x8b\x84\xbd\xee\xa0\xb9\x48\x28\x24\x6e\x2d\x12\xf2\x25\x7c\xf0\xc8\xaf\xef\xac\x4c\x2c\xf8\xcc\xd9\x74\x30\x99\x30\x6a\xdd\x7a\xe4\xd7\x3d\xf3\xf2\xf9\xa7\x4b\x89\x5c\xea\xc6\x7d\x9b\x41\x4a\xe8\x4c\xb3\xf6\x06\x84\xcc\xc2\x85\xcc\xf6\x82\x85\xcc\x8c\xd4\x10\x20\x64\x40\xc8\x2c\x4d\xc8\x3c\x1d\x35\x04\x08\x19\x10\x32\x20\x64\x96\x27\x64\xda\x5d\x8c\x32\x83\x90\x81\x23\x70\x10\x32\x2d\x85\xcc\xf3\xf6\x8a\x02\x21\x03\x42\x06\x84\x4c\x13\x21\x33\x23\x2d\x12\x08\x19\x10\x32\x4b\x13\x32\x4f\x47\x8b\x04\x42\x06\x84\x0c\x08\x99\x85\x0b\x99\x19\xd9\x9a\x66\x10\x32\x70\x4c\x0f\x42\xa6\x99\x90\x59\x13\x66\x8f\x41\x3f\x1d\xa0\xe9\x52\xa6\x98\xba\xb5\x98\x29\x14\xd1\x16\x99\x33\x05\x4c\x17\x34\xf9\xe4\xcd\xfb\xb7\x2a\x51\x63\x54\x0a\xb2\xe6\xd9\xcb\x9a\x5d\x90\x35\x20\x6b\x9e\x99\xac\x59\x93\xc0\x11\x90\x35\x20\x6b\x40\xd6\xcc\x26\x6b\xda\x04\x29\x82\xac\x01\x59\x03\xb2\xe6\xa9\x65\x4d\x63\x0f\x64\x10\x4c\x20\x98\xd6\x4c\x30\xcd\x18\x2a\x09\x82\x09\x04\xd3\xd2\x04\xd3\x9a\x84\x4a\x82\x60\x02\xc1\x04\x82\x69\x39\x82\xe9\xed\x4c\xd7\x66\x6f\x17\x0f\x84\xc1\xdb\xe0\x05\xca\xa5\x9d\xa7\x90\x4b\x6f\x9f\xf0\xd6\x6c\xf0\x36\x00\x6f\x03\x90\x31\xdd\xdd\xe5\xc9\x98\xc6\x56\xb9\x99\x64\x0c\xec\x7d\x40\xc6\xbc\x28\x19\xf3\xf4\x7b\x1f\xd8\xce\xc0\x76\x66\x8d\x44\xcd\x2c\x71\xa0\x20\x6a\x40\xd4\x2c\x51\xd4\xac\x49\x18\x28\x88\x1a\x10\x35\x20\x6a\x66\x13\x35\xb0\xab\x01\x51\x03\xa2\x66\x8d\x44\x0d\x9c\xe8\x80\x5c\x7a\xa9\x72\x69\x96\x00\x52\x90\x4b\x20\x97\x96\x28\x97\xd6\x24\x7e\x14\xe4\x12\xc8\x25\x90\x4b\x4b\x91\x4b\x7b\xdd\xc1\x60\x06\xf6\x9c\x7e\x77\xf0\xb6\xa9\x3c\xda\x6b\x23\x8e\xf6\xda\x4a\xa3\xbd\x39\x84\xd1\x5e\x73\x59\x64\xe1\x89\xb3\xbb\x2d\x84\xc4\x76\x02\xd6\x4d\x25\x52\x59\xde\xc6\x72\xa9\x26\xf3\x4c\x52\x22\x29\xaf\xdf\x86\xad\x39\x33\x4d\xda\xc8\x8a\x77\xdd\x7a\x09\x91\xcd\x11\x0b\x8a\xcd\x1c\xa2\x4e\x43\xc4\x36\x27\xfe\x85\xc4\x8b\xe3\x57\x1e\xb4\x38\x9c\x1f\xcc\x70\x38\xff\xae\xdd\xe1\xfa\xa0\x0d\xac\x0e\x5a\xc0\xaa\x9e\x43\x29\xdf\x77\x1b\x70\xed\x4b\xf1\x10\x4f\xa6\xad\x05\xc2\xea\x76\x06\xd9\xde\xcd\x74\x27\xfa\xa0\x3b\xd8\x6c\x0a\x6c\xdb\x6d\x80\x6d\xbb\x2d\xb0\x6d\xcf\x01\x6c\xdb\xb3\x01\xdb\xdb\x39\x80\xed\xed\x3c\xc0\xf6\xf6\xc9\x80\xed\xdd\x8c\x37\xda\x02\xae\x01\xae\x3d\x15\xae\xbd\x9d\x41\x61\x6b\x83\x6b\x2f\x4e\x61\x7b\x95\xb8\xf6\x16\xf4\x35\xc0\xb5\x75\xc5\xb5\xc6\x07\x77\x6d\x70\x6d\xb0\xd5\x06\xd8\x92\xd4\x8d\x91\x2d\xc9\x31\x0b\xb4\x65\x32\x03\xb6\x01\xb6\x01\xb6\xad\x04\xdb\x4a\xe8\xe0\x17\x83\x6d\xad\x2c\x7b\xba\x17\x72\xba\x0d\x24\x2c\xfe\x21\x91\xb1\xf3\x79\x8f\x7f\x27\x01\x77\x19\x25\xce\x25\x99\xf8\x1e\x56\x2b\x43\x80\xa3\x9e\x34\x09\x54\x5e\x45\xfd\xfe\x16\x79\x2f\xb0\x24\x35\xb4\xf7\x91\x7c\x6c\xab\xe3\x8d\xe4\xa9\x68\x8a\x9e\x04\xf7\x06\xf8\xa4\x45\xec\x94\x16\xb1\xdb\xed\x5b\xd8\xf3\xc7\x38\x97\x7f\xa7\x90\x3f\x49\x69\x66\x36\xa8\xc4\xb2\xf9\x77\x4b\xf2\x57\x26\x7e\x9b\x4f\xbc\x57\xda\xd6\xad\xca\xee\xee\xa5\x82\x47\x4e\xf8\x39\x46\x50\xe0\xd9\x1c\x03\x18\x67\x9f\x71\xfc\xe2\xec\x4d\x86\x2f\x4e\x3b\xf3\xe8\xe9\x02\xcc\xc1\x63\x01\x39\x3a\xb9\x28\x19\xbe\x92\xd6\xbf\xcd\xf4\x5c\x9f\x81\x95\x37\xbe\x32\x69\xc3\xb6\xef\x56\xe4\x37\x9a\x9e\x0a\x8c\x7c\xd3\x1b\x7c\xb7\x58\x42\x4c\xf9\x72\xf9\xa4\x85\x8e\x96\xb7\x3e\x3f\xc9\x8c\x22\x8c\x0e\x08\x5c\x9d\x32\xf6\x22\xb3\x96\xce\x6d\xea\xaf\xca\x6f\x54\x9e\x47\xb5\xc7\xea\x72\x45\x21\x3a\xb9\x86\xba\x6c\x51\x12\x52\x87\x09\x66\x17\xd7\xa1\x71\x07\xa5\xb5\x95\xb6\x72\x3b\x57\x87\x2c\x27\x46\xe6\xf2\x0f\xb3\x5d\xec\xae\x7a\x9c\x63\x42\x2b\x94\xb5\x57\x52\x54\x92\x67\xb3\xfa\xcb\x35\x68\x53\xc9\x6c\x53\x8f\xb7\x5a\xb7\xc9\xc8\xb3\xd9\xac\xf6\xaa\x95\x9f\x19\xf2\xf2\xca\x93\xef\x17\x6a\xa1\xf4\x99\x3c\xa4\xdf\x2f\x23\x59\xf6\x51\xe7\x8a\x3e\x3e\xba\xb7\x88\xfc\x8d\xba\xe7\x7f\x1d\x1c\x1e\x32\x7a\xeb\x8e\xd0\x55\x27\xb8\xc1\xf6\x55\xe7\xf7\xef\x2b\xfa\xe6\x0d\xe2\x21\x0e\x42\x24\x1e\xa1\x31\x09\xc8\x15\xbd\xa2\x6f\xd0\x90\xda\x5e\xe4\x10\x84\x91\x56\x25\x03\xe6\x11\x74\xcb\x02\x14\x8e\x09\x32\x74\x1e\x74\x24\x8f\x21\x2f\x48\xb8\x21\xf2\x61\xea\xa0\x1b\x97\x3a\xc8\x0d\x51\xc8\x0a\x89\xb9\xd2\xcb\xb1\x6d\xb3\x88\x86\xdd\x2b\x7a\xe7\x52\x67\x1f\x1d\xaa\x3a\xce\x99\x47\xae\x28\xf6\x5d\x2d\x7a\xf7\x65\xab\xba\x38\x0a\xc7\x2c\x70\xff\x83\x85\x1e\xdb\xbd\xdb\xe3\x5d\x97\xf5\xee\x07\x37\x24\xc4\x83\x2b\x3a\x21\x21\x76\x70\x88\xf7\xaf\x28\x42\x14\x4f\xc8\xbe\x59\xe3\x15\x0d\x22\x8f\x70\xf9\xf2\x0d\xba\x1c\x13\x74\x78\x32\x44\xbe\x17\x8d\x5c\x8a\x28\x21\x0e\x17\xcd\x1c\x91\x10\xf9\xcc\xe1\x1b\x48\xe4\xe1\x1b\xb2\x1b\xa2\x2c\xee\x63\x9b\xf0\xae\xc8\x6d\x21\xec\xbb\x1f\x03\x16\xf9\x7c\x1f\xfd\xeb\xaa\x73\xd5\xf9\xb7\x78\x8c\x50\x40\x38\x8b\x02\x5b\x57\x22\x7e\x96\x2c\x2c\xfd\x4b\x16\x6a\xfc\x99\x14\xac\x9e\xdd\x93\xe0\xc6\xcc\x3c\x22\x61\xeb\x0a\x09\x75\x7c\xe6\xd2\xd0\xa8\x46\x8f\x75\x69\x25\x6f\xd0\x37\x4e\x1c\xd1\x75\xc7\xe5\x36\xbb\x27\x41\x9c\x1c\x0d\xcf\xb8\xfc\xce\xd8\xb9\x27\x41\xe8\x72\x32\x21\xf2\x4b\xc5\xc5\xfe\xc4\xa1\x3d\x4e\xff\xf4\x5c\x1e\xd6\x94\x7a\xf9\xe0\x8f\x31\xef\xce\xd7\x39\x39\x7e\x3d\x1e\xe2\x30\xaa\xe8\xcd\x09\x21\x0e\x71\x64\xbb\x6d\x8f\xe0\xc0\xa5\x23\x24\x54\xe3\x13\x12\x0a\x35\xf7\x1b\xc5\xf7\xd8\xf5\xf0\x8d\x98\xc3\x1e\x1e\x19\xed\xf1\xcd\xee\xbc\x41\x87\x72\xea\x20\x1e\xb2\x80\x70\xc4\xd9\x84\x20\x5b\xae\x9a\x28\x90\xd3\x0f\xb9\xf4\x96\x05\x93\xf8\xdf\xb2\x69\x08\x53\xca\x42\xf9\xc8\xec\x69\xe4\x3b\x38\x24\x6a\xe2\xfd\x10\xb5\xa8\xe6\x8d\x31\x1d\x11\x39\xeb\x3e\x27\x5b\x2e\xa4\xdb\x79\xc6\x3c\xd7\x76\x2b\x26\x1c\x55\x69\x5c\x3a\xd2\x6b\xa0\x7e\xcc\x54\x6a\x5f\x97\x58\x3e\xd3\x2a\xbe\xa5\xfe\x8e\x37\x0f\xf1\x70\x88\x86\xcb\x92\x1e\xcc\xfe\xcf\xbd\x2c\x72\xeb\xc0\x98\xb4\x1a\x20\x2a\x9a\x6d\xce\x39\xa3\x13\x85\xf5\x2d\xbf\x2d\xe1\xb2\x5a\x3d\x7d\x66\x6b\x73\xcd\xdc\x33\x66\x50\x32\x7b\x26\x8c\xba\x21\x0b\x38\xba\xc7\x81\xcb\x22\x8e\x0e\xcf\x8f\xd4\xa2\x52\x73\xa9\xb4\x11\x76\xe0\x74\xfd\x80\xfd\x1f\x62\x87\x0a\xbf\xba\x2c\x18\xd5\xb6\x6c\xe4\xb1\x1b\xec\xdd\x12\xcf\xfd\xa5\xca\x35\xc6\xd1\x78\xaa\x67\xae\xf1\xf2\x66\xe4\xfb\x84\x04\x3c\x5f\xd4\xcd\xc8\x2f\x14\x94\x3c\x2b\x16\xe3\xfa\x3e\x63\x5e\xa1\x94\xd2\x99\x57\x78\xcd\x89\x89\x55\x95\x79\xb4\xf4\x31\x66\x9d\xf1\x72\xcc\x78\x98\x03\xbe\x0a\x2c\x9d\x32\x6f\xe2\xef\x16\xf1\x10\xd9\x01\xc1\x21\x91\x12\x40\xad\x60\x85\x02\xf2\x13\x32\xaa\xa4\x65\xe4\x2f\xe8\x13\x16\x86\xb0\xf6\xbb\x55\x0e\x46\xbe\xd7\xaa\x0f\xe5\x50\xd4\x0a\xe1\x18\x95\x22\x5c\xa0\x5c\xfb\x95\x63\x88\xbd\xd9\x3e\xcb\xe5\x98\x70\x82\x7c\x12\x4c\x5c\x2e\xf4\x01\x8e\x70\x40\x10\xa3\xde\x03\x0a\xc8\xdf\x51\xe0\x6a\xc8\x8f\xfc\x51\x80\x1d\x82\x6e\x03\x36\x41\xf7\x9b\xdd\x5d\x25\xc0\x6d\x4c\x55\x39\x37\x04\x05\x64\xc2\xee\x89\x83\xf0\x6d\x48\xd2\x0c\x2c\x10\x3d\xbc\x0d\x08\x1f\x23\x97\xf2\x10\x7b\x9e\x01\xe3\xf3\x7f\xde\x9a\xc5\x93\x5d\x83\x4d\x3f\xa0\x65\x59\xad\x15\xa4\x82\x8a\xf5\x97\x4b\x1d\x97\x8e\x1a\xa8\x4e\xcc\x23\xe7\xe4\x56\xbe\x8e\x87\xa2\xa6\x4a\x91\xac\x44\x9f\x2b\x2d\x9a\x47\x37\x62\x1c\x45\x9f\x2d\x9d\x49\xdb\x6c\x0f\x14\xf4\x57\xe4\x43\xa9\xd4\xd8\x47\xd2\x2e\xcb\x1f\x78\x48\x26\xa2\x98\x56\x4d\x94\x29\xd3\x4a\x54\x29\xfb\x7a\xca\x3e\x3e\x12\xea\x68\xf5\x98\x50\x27\xab\x1c\xcb\x6f\x20\x26\xa7\xcb\x91\xd2\xa8\xbf\x62\x1f\xb9\x1c\x45\x5a\xf9\x89\x3f\xb9\xd0\x9e\x39\xf1\x6e\x2d\x89\x53\x4e\xbc\xf2\xcc\x89\x96\x6a\xc0\x71\x41\xd9\xcf\x7b\x5f\xab\xe0\xaa\x8a\xea\x06\x25\xc9\xf7\x06\x5d\x32\x44\xa8\xd4\x80\xa4\x4e\xb6\x81\x38\x09\x51\x28\x3a\x11\x32\x74\x15\x6f\x1d\x42\xf1\xee\xaa\x83\xfe\x0b\x53\xe7\xbf\x64\x12\x8c\x28\xa3\xd6\x7f\x48\xc0\xd0\x3d\xf6\x22\xb5\x09\x90\x45\xa0\x80\xf8\x9e\x6b\x63\x1e\xaf\x32\x8f\xfd\xec\x22\xf4\x43\xac\x36\x9b\x4d\x26\x62\xe4\x22\x2e\x54\x31\x95\xdc\xbd\x45\x0f\x2c\x42\x63\x7c\x4f\xd0\x84\x05\x04\x85\x63\x4c\xd1\x4e\x5f\x01\x45\x17\x1d\xdc\xb0\x7b\x82\x06\x7d\xfd\x40\x6c\x21\x5c\x5d\x36\xe1\x9c\xd0\xd0\xc5\x9e\x5c\x98\xb2\x91\xd7\x5a\x55\xb8\x56\x43\x72\xd5\xa1\x8c\x92\xab\x8e\x06\xb9\xe4\x13\x08\xf8\xd2\x03\xaf\xad\x84\xa2\xbf\x11\x57\x78\xa6\x7a\x7d\x1d\xdb\x0f\xd1\x55\xe7\xc6\x0d\x1c\x51\x4a\x59\x39\x5f\x2f\xbf\xe9\xcc\x62\x8a\x58\xa2\x43\x5d\xf1\x4c\xcc\x14\xfd\x37\x25\xea\x51\x5f\x3e\x14\x0b\x3b\x1c\x5f\x4f\xc2\x48\x14\xfd\xf8\x28\x5e\xfd\xfe\x7d\xd5\x51\xc9\xf5\x1c\x93\xff\xf4\x38\x29\xc9\x31\xd8\xde\xee\xe7\x52\x67\x15\x1d\x2d\x37\x73\x08\x1e\x26\x93\x4c\xc0\x1b\xc1\xf6\x58\x21\x38\x92\xf9\xb8\x4f\x6c\x17\x7b\xaa\x20\xf9\x49\xb9\xd0\x64\xe5\x54\x50\xe5\xa0\x9f\xae\xe7\x09\xd4\xc4\x51\xc8\x84\x24\xb0\xb1\xe7\x3d\x20\x9f\xf9\x91\xd8\x72\x3a\x6a\xe8\xa8\x7b\xad\xab\xbf\x56\xd9\xf6\xd1\xff\xb5\x14\x64\x3d\xc6\xc8\x75\xd5\x11\x5f\xe7\xaa\x23\x3a\x73\xb7\xc7\x2d\x9f\x39\x96\xce\x74\xd5\xd9\x48\x53\xd9\x34\x9e\xf3\x2a\xad\xf4\x59\xce\xa4\x50\x2a\x1d\x17\xaf\xff\x15\x3f\x35\x2a\x52\x89\xc2\x07\x5f\x57\xa6\x3e\xac\x51\x82\x4a\xe0\xb1\xd1\xb5\x47\xee\x89\xa7\x52\xfd\x38\x38\x3f\x19\x9e\x7c\x2c\x24\x13\xcb\x46\x0a\xc8\xeb\xb4\xc4\xf4\x80\xa4\x90\x5c\x0c\x6e\xda\xcf\xeb\xeb\xcf\xdf\xfe\x3a\x3e\x3f\x39\xbe\x3c\xbe\xb8\x3e\x39\x3d\x3a\xbe\x3e\x39\xf8\x7a\x7c\x7d\x5d\xc8\x36\x09\x23\x91\xe3\xfa\xfa\xf0\x64\x78\xfd\xf5\xf2\xdb\xf5\x75\x2e\x81\xeb\xe3\x89\x48\x91\xe9\x65\xb6\x9f\x02\x5a\x2c\x8f\xd9\xd8\xcb\x15\x2f\x92\xf1\xe8\x86\x92\x50\x25\x8c\x38\x39\x63\xce\xa1\xeb\x04\x6a\x8d\xc4\xbf\xdf\xb9\x3a\x95\x9a\x5f\x52\x6b\xb6\xde\xbb\x3d\x5e\x5f\x8e\x39\x5c\xa5\x65\x89\x04\x1a\xc0\x3a\x7a\x75\x88\x0d\xd1\xe1\xed\x48\xad\x10\xa3\xe8\xf4\x0f\xb3\x96\xca\x6f\xef\xb3\x20\x9c\x60\xbf\x30\xde\x9c\x62\x39\x18\x61\x10\x91\xdc\x2b\x1b\xfb\xf8\xc6\xf5\xdc\xd0\x8d\xdb\x2b\x4b\xf9\x8a\x7d\xdf\xa5\x23\x1e\xe7\x32\x5b\x12\xff\x53\x8b\xff\xdf\x5a\x30\x24\xa2\x61\x82\xa9\x7b\x4b\x78\x18\x2f\x45\x6e\x18\x43\x7a\x72\xdb\x68\xc7\x87\x2b\x1b\x08\x73\xf4\x93\x78\x9e\xb4\xa0\x70\x13\xb3\xd2\xdd\x0c\x57\x46\x89\xcc\x82\x47\x4c\xd6\x27\x57\xf8\x04\x0b\x91\x2b\x13\x89\x14\x24\x50\x7b\x53\x97\x22\x6c\xee\x34\xb5\x0e\x99\x08\x9e\xc4\x7c\x93\x15\x3c\xe4\x57\x48\xa8\xd4\xb9\x9a\x5b\x5a\xaa\xc5\x10\x42\x1e\xbe\x21\x5e\xac\xe1\x08\x24\xc0\xbe\x9f\x57\x09\x7c\x62\xcb\x04\x9c\x78\xc4\x0e\x59\xa0\x53\x4f\x84\x42\xf8\xc5\xcc\x5f\x55\x02\xd2\x7a\xd2\x45\x18\xe0\x90\x8c\x1e\xf6\xb5\xfd\xab\xfb\x2d\xf3\x58\x81\x2e\x0a\xd9\x3f\xf1\xc4\xcb\xbf\x44\xff\x17\xb9\xd4\x21\x34\x44\xdb\x2a\x9d\x80\xe6\xdf\xfa\x6b\x8b\x39\xb6\x8f\xce\x99\xe7\xb9\x74\xf4\x2d\x51\xaa\x11\x0a\xcc\x47\x49\x2b\x27\xf8\x97\x61\x79\xd8\x47\x03\x43\xb3\x40\x28\xb6\xe0\xc5\xdd\x34\x47\x57\xfc\xbc\x6c\x97\xab\x3b\x2d\x7e\x86\x09\xc2\xc8\xa1\xa6\xe2\x06\xc2\x1e\xa3\x02\xd5\xc3\xb1\x9a\x5b\x81\x2b\x31\xfd\xc0\x71\x18\xe5\xa7\x42\x9f\x0e\x99\x47\xb4\xf4\x90\x72\x7c\xc3\x2c\x64\x82\x83\x3b\x35\x2b\x7d\xe6\x88\x09\x8a\x91\xad\x8b\x40\xd8\x71\x2c\x46\x37\x10\xa1\x3c\x92\x66\x17\x37\x14\xda\x3d\x37\xf3\xfb\x81\xcb\x02\x37\x7c\x40\xdc\x1e\x13\x27\x12\x23\x25\xe7\x69\x38\xc6\x21\x72\x43\x9e\xea\xce\x52\xc1\x0f\x88\xb4\x58\x3b\x66\x11\xee\xad\x28\x98\xdc\x93\x40\x96\x8e\xc8\xbd\x6b\xc7\x92\x48\xfd\x74\xd9\x24\xe8\x4a\x23\x7a\x37\x05\x20\xa1\x0d\xc7\xed\x15\x12\x68\x1f\xfd\xf9\xa7\xca\x97\x4c\x39\x39\x84\xb7\xb7\x2e\x75\xc3\x07\x63\xfc\xc4\x08\x1f\x14\x1f\x23\xb9\xfd\x70\x03\xe2\x1c\xc9\x3e\x5f\x24\xdd\x1a\x8e\x28\x4b\x1e\x1f\xff\x22\x76\x24\x86\x74\x3f\x8b\x7e\xa2\xd4\x0b\x3d\xc7\x2f\x49\x30\xe1\xfb\x79\x74\xb4\xd4\xa4\x3f\xfe\xe5\x07\x44\xed\x7c\x0a\x49\x44\xa2\x3b\xf2\xb0\x8f\xc4\xe2\xcc\x75\x96\xf1\x62\x6a\x84\x98\x2f\xbe\x30\x0b\xf6\xd1\x09\x0b\x87\xb4\x2c\x89\xd2\x06\x4a\xea\x52\xf5\xe9\xc8\x87\xf8\xb5\x90\x3d\xda\x70\xa5\xe0\x31\x5e\x6e\x27\x46\x07\x7f\x27\x40\x69\x76\x3b\xa9\xe2\xf1\x11\x05\x98\x8e\x08\xfa\xc7\xdd\x06\xfa\xc7\x3d\xda\x7f\x9f\xcd\x8e\x7e\x1b\x98\xfb\xf8\x88\xfe\x71\x87\x7e\xff\x56\xe2\x42\x24\x37\x45\xc5\xe3\xa3\xd4\xd5\xf5\xaa\x8d\x57\x99\xf8\xa5\x93\x3b\xbb\x38\xbe\xe2\x3b\x82\xb8\xd0\xee\x4c\x1b\xb5\x9c\x5f\xf1\x6c\x72\x84\x06\x25\x14\x29\xa5\xa3\xa6\xb9\x2d\x44\x6e\x6f\x89\x1d\x8a\xd1\xd4\xdf\x9f\x98\xe3\x96\x8e\xf6\xf1\x2f\x97\x67\xd7\xc3\x57\x1c\xdc\xd5\x2e\x27\xa9\x63\x07\x24\x5d\x2e\x99\x8a\xe5\x67\x2f\xae\xe1\x66\xb5\x9b\xed\x56\x13\xb4\x51\xb3\x79\x66\x73\x76\x52\x06\xfe\xba\x6f\x2e\x75\x27\xee\x7f\x08\x72\xd8\x4f\x1a\xba\x13\x82\x1c\x85\x0b\x38\x06\x49\x73\xf7\xed\x10\x8f\x88\xef\xf2\x3f\x51\x48\x3c\xcf\x94\x53\x21\x43\x0e\x43\x18\x5d\x75\x6e\x59\x60\x1b\x15\xc4\x59\x84\x4c\x1e\x87\xa1\xcf\xf7\x7b\xbd\xec\xf4\x77\x98\xcd\x7b\x36\xa3\x36\xf1\x43\xde\x13\xd3\xd3\x63\xd8\xe1\x3d\x69\xc9\xf3\x99\xd3\x7b\x13\x92\x60\xe2\x52\x39\x23\x2c\x76\x2b\x30\x21\xfd\xb4\xc6\xbb\x8f\x01\xb6\xc9\x19\x09\x5c\xe6\x5c\x08\x5d\xc5\xe1\xfb\xa8\x1f\xa7\x13\xa0\x90\xb8\x47\x14\x41\x37\x95\xee\x59\x05\xc0\x90\xeb\x37\x2e\xc5\x81\x61\xed\x12\x59\x05\x2c\x16\x15\x7b\x74\xeb\x7a\x24\xab\xcb\x9b\x5f\x54\xc9\x62\x5d\x8d\x65\x53\xd7\xfc\xa2\xee\x04\x8f\xc8\x3e\x7a\x7c\xec\x1e\x9e\x0c\x87\xe2\x0f\x73\x49\x21\x24\x76\x6a\x58\x68\x02\xff\xba\xea\xf4\x8c\x32\xba\x7c\x9c\xd8\x36\xd4\x8f\xd0\xfb\x1c\x34\xbc\x41\x62\x26\x20\x76\xab\xba\x76\x32\xcc\xb4\x57\x6c\x85\xa5\x1d\xa3\x9b\xcd\x15\x37\x58\xa8\xbd\x87\xa7\x27\x1f\xa4\x7e\x9c\xc7\x1c\x09\x45\x72\x0f\xd4\xb7\xb4\xb5\x45\x94\x2d\x6d\x45\x9d\x7c\x2b\xca\xb7\x43\x95\xfb\xa0\xea\xe6\x9c\x1c\x5f\xfe\x38\x3d\xff\x2c\x9b\x35\xfc\x58\xda\xa6\x0f\x01\x9b\x94\x20\xa4\x1d\x6f\xe0\x3f\x93\x87\xd8\x66\x92\xff\x55\x6c\xde\xf3\x3f\xb9\xbe\x8b\xdb\xab\x7c\xa7\x2f\xe4\xfe\x9d\x48\x18\x16\x25\xa3\x1b\xcc\x15\x5e\x89\xa7\x77\x7b\x5c\x29\x80\xe2\x55\x45\x8f\xcb\xf6\x29\x6d\xfa\x7c\xeb\x12\xcf\xa9\xea\xac\x7c\x79\x86\xc3\xf1\xbe\x14\xb3\x5d\xd1\x18\x31\x5b\xf2\xdd\x10\xdf\x4d\xec\x98\xf5\xe1\xe4\x3d\x0e\x5c\xa1\x2d\x55\x7f\xa3\xaf\x97\xdf\x9e\xf2\xc3\xc4\xfb\xf3\x7c\x3f\xce\x02\x72\x4f\x68\xa8\xd5\xfc\x64\xf1\x4b\x9b\x24\xf7\x08\x11\x1b\x09\x81\xeb\x42\x87\xa9\xf8\x1e\x17\x5f\x8e\x8f\xcf\x2a\x17\xc2\x2d\xf6\x38\xc9\xce\xfd\x7b\xe6\x45\x13\xf2\x55\x1e\x95\xec\xe7\xcb\x9c\x88\xc7\xea\x03\xf4\xc4\x1c\xe9\x31\x3f\xec\xd9\xd4\xed\xdd\xb8\x05\xe1\xaf\x07\x80\xba\xd6\x8d\x4b\x2d\xc7\x0d\xa6\x16\x46\x42\x5b\x16\x46\x49\xd8\x75\xaa\x8b\xa3\x24\x34\x8b\xb3\xcb\x01\xf3\x3c\xa2\xbc\x7c\x6b\x94\x2c\x5c\x43\x34\x24\xb6\x0c\x37\x03\x9d\x69\x1e\x3f\x60\xa3\x00\x4f\x78\x82\x07\xfa\xec\x4a\x80\x6b\xc0\x22\x51\x86\x2e\xd6\xcc\x2f\xba\x55\x02\xab\x25\x52\x0e\x65\x60\x55\xa8\x2b\x25\xb8\x5a\x86\x96\xdf\x38\x31\x3b\x72\x70\x36\x8c\xb7\x7b\x37\xd8\xbe\x13\x13\x24\xb1\x3c\x54\x4c\x91\xa3\x83\xcb\x83\x8b\xcb\xd3\xf3\xe3\xeb\xcb\x7f\x9e\x55\x83\xa6\xb9\xfb\x2e\x80\xa5\x34\xc6\xf1\xc8\x17\x3b\xdc\x7d\x94\x78\xd3\xc9\x03\x3f\x89\xe3\xf1\x8a\xa9\x68\xc3\x87\xe3\x2f\xc3\xff\x2d\xea\xff\x74\xf0\x79\xef\xe2\xe2\xf8\xfc\xfb\xf0\xf0\xb8\x2d\x76\x2c\x7a\x59\x16\x6d\x82\xf9\x7e\xff\xc0\x6e\x98\x78\x30\x4c\x1b\xe7\x1f\x07\xc3\xcb\xeb\x0f\xa7\xe7\xd7\xc9\x80\x57\x8e\xb5\xd4\x7c\x0b\xa3\x2c\xd0\xb9\x35\x20\x0b\x14\x7e\x1a\x10\x1e\x33\xc6\x49\x32\x13\x73\x56\xd2\x62\x43\x0f\x0f\xbe\x0c\x0f\x4f\x63\x71\x39\x3c\xf9\x78\xfd\xd7\xc1\xe1\xe7\xe3\x93\xa3\x27\x15\x99\x19\x63\x6e\xa1\x8b\xca\xf0\x21\xf7\xf0\x52\x33\x10\x9b\x7c\xf7\x56\x4d\x79\x87\xf8\x1e\x7b\x98\x88\x6d\xbf\xb4\x23\x95\x77\xfa\xcb\xb7\x8b\xcb\xe3\xf3\x29\x2b\x6f\x8f\x6f\xdc\x8c\xfc\xe2\x84\x38\x88\x42\x66\x39\x24\x24\xb6\x12\xdb\x7f\x7d\x3c\x43\xc3\x33\xa1\xf0\x8b\x3d\x5e\xc5\x38\x0f\xab\xc5\x01\x8e\x42\xa6\x8a\x2b\xd6\x75\xac\x0c\xfd\xc3\xb3\x7c\xfe\xdc\xf7\x1b\x9e\x7d\xdf\x3e\x3b\x3d\xfd\x72\x5d\x4c\x69\xd4\x74\xe0\xfd\xc4\x0f\x25\x40\x22\xa6\xb8\x90\xdc\x72\x4d\x45\x94\x12\x0f\x39\x44\xfa\x90\xc8\x93\x10\xb1\x65\xf7\x5d\x79\x32\xa2\x0e\x1e\x9c\x3a\x38\x19\x9e\x0d\x4f\x86\x67\xcf\x54\xb6\x5f\xca\x29\x22\x5d\x8d\xd1\xf0\xec\x7e\x1b\xf9\x8c\x79\xa9\x7e\x6b\x9c\x0b\xcb\x33\x00\x46\x09\x22\x72\xfb\xd4\x45\x67\xcc\x91\x2e\x35\xda\xa4\x9e\x2f\xd9\x1e\x33\x4e\xa8\xd2\x12\xa4\x05\x5e\xee\x86\xbb\xe8\x70\x8c\xe9\x48\x08\x04\xf9\x50\x1d\xbc\xa8\xb3\x4b\xf3\xf8\x48\x95\x3a\xc6\xf7\x85\x72\x29\xd3\xfb\xbc\xae\xda\x8d\xf0\x31\x8b\x3c\x07\xdd\x0a\x5d\xf8\xa7\x1b\x8e\x5d\x8a\x2c\x2b\x8e\x12\xb0\x5d\xa7\x4a\x25\xc9\x4f\x94\xc3\xe1\xd1\x79\xe5\x44\x11\x7b\x0c\x55\xa4\x48\x96\x37\xe3\x8a\x66\x1d\xb9\x5c\x39\xe1\x88\xfd\x81\xc7\x46\xb2\x8b\x9c\x49\x6b\xa1\x1d\x7a\xe2\x11\x97\xe6\xcb\xaa\x05\xa1\xdb\x73\x34\xbc\x38\xf8\xeb\xcb\xf1\xf5\x87\xe1\x97\xe3\xeb\x2f\xa7\x1f\x3f\x0e\x4f\xca\xb5\xf5\x7a\x80\xfe\x40\x3c\xf7\x57\xe2\x3b\x25\x3e\xa8\x50\x02\x92\x4f\x8d\xed\xf8\x38\xe5\xe0\xf0\xf0\xf8\xec\xb2\x56\x1e\x1e\x1d\x7f\x38\xf8\xf6\xe5\xf2\xf8\xe4\xe8\xec\x74\x78\x72\x79\x79\xfa\xe9\xf4\xe2\xf2\xe0\xf0\x72\x78\x7a\x52\xbd\xb2\x64\xb1\xd5\xc3\x34\x3c\xbb\xdf\x15\x93\x2b\xd5\x19\x6a\x9b\x30\x3c\xfb\xbe\x7b\xf1\xed\xec\xec\xf4\xfc\xb2\x8d\x16\x99\xf9\x30\x72\x44\xe2\x2f\x13\x32\xf9\xa5\xea\xea\xfc\x72\xfa\x51\x7c\x84\xb3\x83\xcb\x4f\x95\x75\xa6\x27\x73\xd3\xab\x14\x60\xc2\x1f\xb8\xc7\x46\x53\x6a\xbd\x38\xfe\x7e\x7c\x3e\xbc\xfc\xe7\xc5\x3f\x2f\x2a\x2b\xae\x44\xc6\x42\x37\x79\xe8\xb0\x28\x6c\x5a\xe5\xe1\xf9\xf1\x71\xf5\x57\xfd\x81\x03\xea\xd2\x51\xbe\xf2\x6c\x71\x9f\x8e\x0f\xbe\x5c\x7e\x3a\x3e\x11\xd3\xb8\x5c\x6a\x96\x4f\x5d\x4e\xec\x28\x70\xc3\x87\x43\x46\x43\xf2\x2b\xcc\x41\x9e\x1f\xb8\xf7\xae\x47\x46\xc4\x89\x2d\x72\xe9\xbb\xa2\x83\x42\xfc\xfc\xef\x88\xf0\xc2\x06\x02\x21\xdb\x8f\xf6\xd1\xe6\x4e\x7f\x62\xbe\xf0\xdc\x7b\x42\x09\xe7\x67\x01\xbb\x21\xb9\x2c\xe3\x30\xf4\x3f\x92\x7c\x9b\x10\xf2\xd5\xbe\x21\xce\x5a\x78\x2d\xf5\xd0\x77\xfd\x77\xef\xf2\x6f\xc4\x6a\xdc\x47\xf2\x78\x4b\xfc\x33\xd7\xd7\xac\x61\x66\xd0\xcf\xbe\x76\xa9\x1b\xba\xd8\x3b\x22\x1e\x7e\xa8\x4c\x74\x8b\x5d\x2f\x0a\xc8\xe5\x38\x20\x7c\xcc\x3c\x67\x1f\xed\x66\x87\x0c\x3b\x6e\x55\x6f\xc9\x2f\xc3\x78\x9c\x8c\x99\xb6\xa6\xe4\x9f\x5b\x48\x6c\xbb\x7a\x15\x9b\x09\x95\xc2\xba\x71\x03\xc7\x12\x75\x3e\x94\xbc\x94\x53\xb6\xec\x6d\xed\x38\x34\xde\x21\x7a\xee\x4d\x6f\xc2\x9c\xc8\x23\x85\xef\xa3\xa6\xad\xe7\xde\x58\x15\x09\x44\xa3\x4e\xa9\xf7\x50\x9c\x73\xf9\x5a\x82\x88\xf6\x7e\x85\x62\x09\xf2\xae\xc7\xec\xbb\xf2\xaa\x74\x0a\xab\x2c\x45\x5a\x97\x42\xb2\x9a\xca\xee\x71\x20\x2b\xd4\x67\xc1\xa5\x55\xdd\xe3\xc0\x0a\x22\x6a\x95\xa7\x69\x59\x99\x18\xc3\x69\x95\x89\x61\x6c\x55\x99\xfa\x82\xd9\xfd\x72\xec\x9e\x69\x6c\x99\x4b\x76\xae\x15\x5f\x4c\xac\x24\xd9\xe8\xdc\x34\xaa\x99\x07\xd6\xd4\xd1\xaa\x2f\xb4\xea\x4b\x58\x53\x07\x66\x7a\xb9\x65\x83\x6e\x4d\x99\x48\xf5\xa5\xd6\x4f\x52\x75\x06\xf8\xc1\xf5\xc8\x69\x70\x98\x71\xd3\x32\x1d\xa0\x63\xbb\xe3\xe1\xc9\xb0\xcc\xa6\x50\x6e\x68\xa9\x6f\x56\xa9\xf9\xc6\xaa\xb6\xb5\x4c\x2f\x31\x67\xc3\xb9\xa2\x6f\xd0\xa1\xf6\x7e\xf4\x3c\x65\x0a\x88\x78\xc8\x26\xe7\x5a\x72\x1c\x11\x79\x14\x26\x3d\xf1\x68\xe2\x71\x2d\xb3\x29\xab\xb6\x61\x67\x49\xbd\x95\xd1\x44\x4d\x4f\x7d\x3c\x6e\x9e\x32\x63\xdf\x4d\x0f\x9a\xab\x1c\xd7\x2a\x9a\x90\x3b\x8f\xd6\xe3\x50\xe2\x3f\xd9\x2d\xf7\xdb\x33\xce\x9b\x6d\x26\x3e\xa9\x56\x5d\xc5\x93\x91\x72\x23\xab\xc8\x28\xdd\xf5\x52\xff\x2c\x7d\xec\x1d\x9f\x6d\xcb\x46\x4b\xc5\xf2\xd0\x6c\x86\x7a\xeb\x7b\x51\x80\xbd\xd2\x66\xaa\x04\xdc\xa5\xa3\xc8\xc3\x41\x59\x92\xc4\xf9\x6c\x49\x43\xa8\x46\x30\x76\x4c\x7c\x8a\x61\xfb\xeb\xe3\xd9\x19\x21\x41\x76\xac\xb2\x9e\x92\xe9\x00\xe9\xe7\xc9\xa8\x2c\x7f\x5c\x9e\x7e\x5e\xfd\xf5\xf1\xac\x66\x56\x55\xb8\x9b\x66\x86\xac\x7c\x46\x2d\x79\xec\xb4\x9b\xf3\x53\x8c\xd8\xf0\xec\x8c\x31\x2f\x3b\x4e\x19\xaf\xeb\x74\x74\xd4\xe3\x55\x8d\x49\xc6\x79\xfd\x29\x46\xe6\x13\xe3\xe1\xb1\x6e\x40\x76\x7c\x4a\xfc\xea\xd3\x51\x32\x5f\xae\x6a\xac\x4a\x1c\xdf\x9f\x62\xc4\x74\x59\xc3\xb4\x19\xd9\x71\xab\xf4\xcf\x4f\x47\xaf\x98\x64\x55\x63\x58\x1a\x9c\xf1\x14\xa3\xf8\x51\x36\xc4\x8c\x78\x7a\xc8\x0e\x63\x4d\x18\x49\x3a\x90\xc5\x44\x0f\x4f\x32\x92\x9c\x3c\xcd\xea\xcd\x8c\xe2\x05\x09\x6b\x86\x30\x0d\xb5\xa9\x18\x3e\x4e\x56\xb6\x92\x67\x9c\x7f\x27\xb1\x63\xa1\xb3\x90\xc1\xab\x99\x7c\x53\xa6\x5d\xf9\x84\xcb\xbb\xec\x97\x07\x34\xcc\xee\x3f\x29\xfe\x27\x5d\xab\x28\x41\xdd\x43\x8f\x45\xce\x59\xc0\xee\x5d\x87\x04\x89\x75\xed\x77\xec\x80\x3a\x4b\x3c\x81\x34\xdc\x1a\xa5\xfe\xfe\x6d\xc5\xc2\xb0\x41\x6c\x41\x65\xee\xd4\x21\xdc\x6c\x43\xe1\x93\xf5\xee\xb7\xaa\x24\x76\xc1\x29\xd2\xd4\x27\x2c\xd7\x77\x7d\x6b\xa0\x3f\x91\xe9\xca\x67\xbb\x4e\xa0\x7b\x65\x9a\xa3\xf5\x5b\x91\xed\x2b\x73\xc8\x3e\xd2\x27\x1b\x49\xd9\xe1\x69\x14\x8e\x98\x4b\x47\xb1\x25\xa3\xe6\xeb\x9e\x31\xa7\xe6\x93\xda\x61\xed\xd8\x25\xad\x2d\xf1\xa1\x93\x96\xba\x10\x07\xa1\x9a\x9e\xfb\xe8\x94\x7e\x50\x66\x2a\xf1\x2e\xef\xcb\x56\xe9\xcf\x55\xe6\xc9\x55\xed\xb4\x56\x96\x3a\x77\x58\x6f\x95\x75\x30\xe3\x5f\x14\xbf\x88\x87\x3a\xe3\x5a\x74\xe3\xd2\x1e\x1f\x5f\x75\x36\xd0\x55\xc7\xb2\xd5\x7f\x93\xa2\x10\xf6\x7d\xef\x01\x59\xb7\xd5\x93\xa9\xfb\x80\x27\x5e\xe2\x91\x94\x9e\xae\xd7\x9f\x8c\x6b\x4b\xaa\x71\x1e\x2e\x5f\x94\x98\xc5\xac\xec\xdc\xca\x9e\x14\x65\x6c\x58\x8c\x85\x3d\x09\x33\x86\x65\xa6\x26\x77\x72\x48\x95\x9b\xc4\xb5\x8b\x4e\x8e\x6c\x48\x32\xbe\xa1\xda\xf3\x6f\x6a\xbe\x74\x87\x3f\x65\x30\x65\x04\x88\x1c\x04\x15\x00\x42\x10\x09\x6d\x07\x5d\x7e\xb9\x40\x9c\xd8\x81\x90\x6c\x86\x2f\x65\x86\x31\x49\x13\x1b\x34\xe2\x18\xd0\x01\x53\x17\x52\x3e\xc4\xf3\xa7\x17\xbb\x3f\xf3\x9e\x8c\xd6\x52\x0d\x9a\x46\x3c\x90\x27\x8d\x17\x53\xcc\x67\x94\xd0\x69\xec\x03\x85\x8c\x2b\x65\x22\xc8\xd7\x9e\x65\x25\x38\x91\xd1\x4e\x38\x20\x2a\xf6\x51\x19\x90\x74\x0c\xb1\xec\x79\xec\xf2\x38\x43\xf4\x72\x4d\x0c\x66\x4d\x20\x7f\x12\xa0\xf9\x46\xc0\x9c\x6a\xdb\xdf\x11\x91\x01\x97\x21\x43\xf6\x98\xd8\x77\xb2\x65\xf2\x94\x93\x50\x7b\x86\xe8\xd0\x34\x16\xbc\x22\x38\xf4\x0d\x1a\x9e\x1d\x7c\xcd\xf9\x86\x4f\x30\x75\x75\x14\x12\xfa\x39\x26\x54\x87\x8a\x89\x57\x72\x94\xb4\x4b\xf8\x82\x23\x73\xab\x02\xd0\x17\x10\x21\xea\x31\xfb\x4e\x7b\x9e\x67\x02\x9f\x5d\x1f\x4f\xe4\xcb\xdc\xb3\x31\xa6\x8e\x37\x43\x58\x6d\x75\x48\xb0\xfa\x5b\x8d\x9e\x9e\x90\x92\x17\x03\xdb\x36\xe1\xd2\x11\x57\xc7\x41\x97\xed\xc2\x16\x33\x0a\x8d\x03\x9b\x33\x1d\xac\x0d\x92\xad\x8e\x7a\x5d\xe6\xaa\x5e\x46\xc0\x6c\xb1\x9a\xf6\xc1\xb3\xc5\x32\x56\x13\x48\xab\xa1\x1f\xd8\x64\x80\x4d\x06\xd8\x64\x80\x4d\x06\xd8\x64\x80\x4d\xa6\x31\x9b\x4c\x99\x06\xb4\x04\x86\x99\xd2\xd4\x40\x3b\x03\xb4\x33\x40\x3b\x93\xf9\x80\xe5\xa3\x12\x07\xc2\xca\x01\x31\x62\xdb\x42\x26\x92\x8a\x2f\xaa\x76\x71\xd8\xf3\x98\xbd\xd8\x4e\x3e\xbf\x9d\xd3\x22\x16\x26\x9e\x64\x40\xb3\x72\x6f\xfc\x97\xe8\x21\x4a\xfb\xaf\x50\x04\x7b\x9c\x89\x79\x27\x67\xb3\x54\x50\x6e\x1e\xe4\x3a\x53\x1f\x48\x06\xbf\x20\x3c\x1a\x05\x64\x54\x2d\x74\x17\xf4\x2d\x2a\xd5\x03\x2d\x59\xd5\x64\x91\xb3\x63\xe2\x8e\x34\x0a\x64\x94\x68\x75\xc3\x9d\x3c\x61\x29\x99\x7e\x36\xa6\xda\xa1\xfa\x4d\xb2\xce\xa4\xf7\x75\xa8\x17\x9a\x74\x27\x15\x6b\x13\xd3\xac\xd7\xb4\x22\xa5\x49\xa9\x33\x64\x23\x4a\x87\x02\xfb\x3e\xaf\xed\x79\xda\xc4\x9a\xef\x05\xac\x4d\xab\x65\x6d\x9a\x66\x70\xcc\x04\x03\x24\x96\xc7\xb5\x23\x74\x52\xa4\x4a\x5c\x6c\x8f\x64\x78\xc5\x4c\xdc\x48\x40\x8a\x04\xa4\x48\x59\x52\x24\xa1\x88\xad\x19\x23\x52\x59\x37\x2d\x95\xd6\x4c\x08\x5c\x47\x8b\xe5\x3a\xaa\x81\xd8\x3b\xc7\xb1\xec\xc0\xe1\x1a\x5d\xc1\xe9\x73\x76\xa7\xcf\xe5\xbb\x7c\xa6\x1a\xf3\xd3\x78\xe8\x1d\x7c\xfd\x2b\xf5\xb3\x4e\x9d\xf4\xb2\x6a\xbc\xe9\xa7\xa7\xdf\xac\xc4\x1d\x36\xab\xda\x3e\x89\xd3\xa7\x68\x42\x4c\x43\x94\x1d\xa4\x52\xc5\xdb\xf0\xf7\x34\x5e\x3f\xac\x6a\x26\xe9\x7d\xd6\x53\x4d\xa5\x4f\xb2\xfa\xe2\x5c\xca\x6c\xff\xb2\x93\x69\xac\xb3\xac\x66\x7c\xf4\xfe\xee\xa9\xc6\xe7\xd0\x70\x4d\x30\xc7\x27\xb3\xed\xcc\x8e\x4f\xac\x96\x82\xf3\x79\xd1\xf9\x7c\x25\x63\xf2\xf4\xb2\x6d\xe1\x8e\xe7\xab\x58\x6b\xeb\xe4\x74\xbe\xfc\xf1\x78\x19\x0e\xe7\xcb\x1f\xa7\x97\xed\x6c\xbe\xfc\xf1\x7b\x0d\x8e\xe6\x2b\x1e\xc5\x17\xe3\x64\xbe\xfc\x71\x7b\x61\x0e\xe6\x2b\x1b\xb0\x36\x53\x6c\x39\x83\x55\x98\x5f\x35\x33\xab\x30\xa7\xa6\x5b\x7b\xa5\xb1\x2f\x63\xeb\xad\x63\x68\xb6\x1a\x30\x34\x03\x35\x33\x50\x33\xa3\xfa\x4e\x23\xa0\x66\x5e\x04\x35\x73\x29\x75\x30\x2a\xa7\x3e\xde\x47\x9e\x4b\xa3\x5f\x71\xaa\xe5\x10\x0c\xab\x07\xa5\xc1\x1b\x08\x08\x87\xe7\x23\x1c\x9e\x16\x4e\xa0\xe7\xd4\x34\x42\xe2\x1c\x07\x34\x30\x13\x97\x30\x13\x6b\x17\x0d\x9e\x75\x65\xc9\x1d\x8c\xcb\x13\xd0\xf4\x48\xa5\x6b\x96\x37\x0c\xf5\x31\x7c\xec\x7d\x2f\xd0\x40\x1e\x9a\xb9\x62\x96\x15\x5d\x5c\x36\xc4\xd8\x9a\x17\xc1\x60\x2f\x47\x69\xf3\x26\x6e\x4c\x7c\x26\x59\x51\x79\x1c\x71\xa3\x53\xab\xe3\x1e\x63\x62\xb5\xe2\x3d\x36\xb8\x35\x7a\x99\xe3\x23\x19\xad\xa4\xab\x98\x4a\x87\xfc\x8c\x78\x75\xd7\x94\xa8\xb1\x31\x5d\x51\xc2\xfc\xa2\xe8\x4b\xa4\x32\x98\xaf\x2f\x35\x7b\xa8\xd9\x5c\x46\x90\x92\x2f\x77\x0e\xa2\xdc\x29\xbc\xdf\x40\xf8\x0d\x84\xdf\x40\xf8\x0d\x84\xdf\x66\xef\x80\xf0\xbb\xb0\xd3\x06\xc2\xef\x6c\x6b\x16\x40\xf8\x0d\xc4\xd7\x40\x7c\x0d\xc4\xd7\x40\x7c\x0d\xc4\xd7\x40\x7c\x0d\xc4\xd7\x95\xa3\x61\xb0\x41\xc7\x4e\xb0\x75\x75\x36\xe7\x83\x9e\x5e\x16\x90\x41\xa7\xaf\x81\x0c\x5a\xa6\x00\x32\x68\x20\x83\x9e\x85\x0c\xda\x02\x32\x68\x20\x83\x5e\x00\x19\x74\xda\x8b\x0c\x41\x94\xe3\x06\xf2\xd8\xec\x41\x2a\xbf\xf9\x93\x02\x33\xca\x53\xa9\x5f\xd9\x1d\xbb\x54\x8f\x25\x6d\x4f\x2e\x42\xcf\x28\x28\x7b\xda\x90\x84\xd4\x1a\xd1\xb4\x66\x89\x71\xc0\x1f\x57\x2a\x23\xc9\x1c\x07\x48\xd8\x4f\xcd\x04\x25\x63\x5f\x6f\x1c\x6e\x38\xdf\x32\x26\xe8\x95\xf0\x01\x36\x74\x79\xc8\xb3\xbe\x24\xee\x0f\x17\x84\x24\x27\x54\x23\x37\x1c\x47\x37\x5d\x9b\x4d\x7a\x19\xf7\x8d\x5e\x91\x33\x66\x9a\x17\x82\x76\x5b\x48\xb6\x94\xed\xc8\x73\x66\xf6\x51\x28\x2b\xaa\x78\xe8\xde\xf6\x10\x3a\x39\x80\x56\xbb\xa0\xb4\x7c\x39\x1b\x65\x34\xb8\x3a\xc0\x92\x6e\x29\x1e\x91\x4a\xf3\x3d\x51\x8b\x3d\x66\xc4\x0a\x88\xef\xb9\x36\xe6\xd2\xc1\x00\x21\x9e\x7a\x3f\xa4\xe8\x71\x4e\xd2\x08\xe3\x69\x8e\x07\x53\x87\x30\x4e\x54\x3e\x8c\xa8\x89\xef\x42\x79\xb1\x8b\x3a\x8f\x87\xa3\xf1\xf2\x76\xaf\xe6\x68\xbc\xf8\x6d\x73\xc7\xe4\xa5\xf6\x5a\xab\xd9\xbc\x43\xd9\xc3\xa7\x34\x41\x43\x63\xa9\xb6\xc5\xfd\x1c\xbb\xf6\x18\x99\x5c\x7d\x21\x43\x41\x44\x2b\x76\x80\x7a\xd7\x74\x7d\x78\x7a\x72\x79\x7e\xfa\xe5\xcb\xf1\x79\xd5\x7d\x40\x45\x45\xbb\x95\x91\x35\xcf\x21\xa9\x7e\x0b\xdd\x15\x44\x3c\x50\x3b\x83\x31\xb1\xef\x2c\x93\x26\xc6\x4c\x66\x95\x85\x1d\xcc\x22\x66\xda\x60\x70\x91\xfd\x71\xb7\x19\xfb\x63\x33\xe2\xc7\x75\xa5\x7e\x04\xd6\x47\x60\x7d\x7c\x11\xdc\x25\xc0\xfa\x38\xd3\x82\x7e\x11\x84\x8f\x6b\x43\xd2\x08\xfc\x8c\x2a\x0f\xf0\x33\x02\x3f\x23\xf0\x33\x02\x3f\x63\xfc\x0e\xf8\x19\x81\x9f\x11\xf8\x19\x81\x9f\x11\xf8\x19\x81\x9f\x11\xf8\x19\x97\xcb\xcf\x08\xd4\x8c\x40\xcd\x08\xd4\x8c\x40\xcd\x58\xa0\x66\x1c\x00\x35\x23\x50\x33\x3e\x17\x6a\x46\xe0\x67\x5c\x2d\x3f\x23\x30\x34\x56\x30\x34\xae\xe8\x72\xee\x17\xc4\xd2\xb8\xba\x19\xb5\xbe\x4c\x8d\xab\x1b\xa3\xf5\x65\x6b\x5c\xd9\xb5\xf8\x6b\xc6\xd8\xb8\xb2\x71\x79\x7a\x89\xb7\x70\xd6\xc6\x55\xad\xbb\x75\x62\x6e\x5c\xcd\x98\xbc\x0c\xf6\xc6\xd5\x8c\xd5\xcb\x66\x70\x7c\x82\x4b\xee\x5f\x28\x8b\xe3\x13\x8c\xe4\x8b\x61\x72\x5c\xcd\xd8\xbd\x30\x36\xc7\x95\x0e\xda\x9a\x33\x3a\xbe\x36\x5a\x47\xec\xfb\xbc\x91\x8b\x13\x70\x39\x02\x97\x23\x70\x39\x02\x97\xe3\x3a\x04\xac\xa4\xef\x16\xc4\xea\x68\x16\xf8\x5a\xf9\x1d\xe3\x75\x7e\xe8\x61\xce\x4f\x8c\xd3\x56\x39\x46\x56\x3c\x1d\xe2\xe4\x40\x07\x89\x80\x0e\x72\x3d\xe9\x8b\x80\x0e\x12\xe8\x20\x81\x0e\x12\xe8\x20\x81\x0e\xf2\x85\xd0\x41\x4a\x3e\x38\xc7\x11\xca\xc0\x07\x8f\xfc\x42\xdf\x65\x07\xd0\x51\xe0\x8a\x2d\x87\xdc\xa8\x28\xc8\x10\x29\x7c\x12\x08\x05\x09\x7d\xa3\xee\x2f\x74\xc4\x26\xd8\xa5\xe8\x82\xd9\x77\x44\xc6\x19\x61\xcf\x63\x3f\xd1\x91\x7b\x27\x76\xe1\x19\x0c\x95\x6e\x61\x93\x49\x44\x5d\x1b\x87\x44\xed\xc7\x14\xc5\x13\x53\xb5\x10\xa4\x4c\x39\xe8\xe2\x81\xda\xe8\xe0\xac\x8c\x95\xe3\xd6\x23\xbf\xee\x99\x67\x39\xb2\x69\xe5\x80\x2a\xba\xf0\x9d\x79\xc3\xc9\x28\x0b\xa9\x95\x9f\xa5\xbc\x70\xab\x48\x67\x54\x18\xee\x6c\x33\x80\x55\x13\x58\x35\x81\x55\x33\xf7\x03\x56\x4d\x60\xd5\x04\x56\x4d\x60\xd5\x04\x56\xcd\xaa\x61\x02\x56\xcd\x46\x9f\x13\x58\x35\x93\x6c\xc0\xaa\x39\x7d\x1c\x80\x55\x73\xdd\x59\x35\xe3\xca\x54\x11\x6a\x97\xc3\x1f\xa8\x9d\xcf\x5e\xd6\x73\x31\xcf\xf0\x88\xd0\x64\xe5\x00\x45\x27\x50\x74\x02\x45\xa7\x51\xe2\xb3\xa3\xe8\x2c\x7e\x7d\xad\x77\x57\x1b\x9c\x4a\xd6\x56\x39\x4c\x54\xb4\x47\x4d\xc2\xa3\xf8\xcb\x14\x67\x22\x2a\x2e\xbe\x02\xb2\x94\xcd\xd7\xa2\x15\xad\xd8\xd2\x29\xd6\xa6\x05\x34\x39\xe2\x72\x9c\x85\xb8\x36\x4e\x44\xe5\x3f\x3d\x12\x5a\xda\x9d\xa6\xa7\xa0\xb1\x27\x93\x25\xbd\xfb\x7f\x91\xb3\x28\xda\xd4\xd6\xa4\x47\xcf\x9b\xa7\x75\x41\x4c\xad\x89\x63\xd1\xb3\xa1\x67\xad\xa4\x56\xe5\xf3\x70\xab\xb6\x77\x67\x2a\xeb\xe4\x1a\x53\xb4\x56\xb9\x27\x3d\xa5\xcb\x0e\x78\xcf\x94\xb7\xfb\x99\xd0\xbd\x56\xbb\x92\x24\xc6\x9c\x9c\x37\x09\x10\xc4\xae\x07\x41\x6c\x91\xaf\xf5\x2d\xf0\xb5\x02\x5f\x2b\xf0\xb5\xbe\x10\x2e\x23\xe0\x6b\x05\xbe\x56\xe0\x6b\x05\xbe\x56\xe0\x6b\x05\xbe\x56\xe0\x6b\x05\xbe\xd6\x97\xc5\xd7\x5a\xa9\x74\x01\x91\x6b\xbd\x9a\x08\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x9a\xcd\x06\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x10\x8a\x02\x44\xae\x40\xe4\x0a\x44\xae\xcf\x85\x2f\x71\x4d\x19\xb3\x80\xc8\x15\x88\x5c\x81\xc8\x15\x88\x5c\x81\xc8\x15\x88\x5c\x81\xc8\x15\x88\x5c\x75\x15\x40\xe4\x0a\x44\xae\x4f\xa9\x96\x02\x91\x6b\xbe\x2b\x40\xe4\x9a\xfb\x01\x91\x6b\xa3\x89\x02\x44\xae\xc5\x36\x01\x91\x2b\x10\xb9\x02\x91\xab\xfc\x01\x91\xab\x39\x4c\x40\xe4\x0a\x44\xae\xe9\x0f\x88\x5c\x81\xc8\x35\xee\x05\x10\xb9\x4e\x99\x6f\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\x40\xe4\x0a\x44\xae\xeb\xe3\x3d\x03\x44\xae\xa6\xac\x01\x22\xd7\xb9\x88\x5c\xf7\x1a\xf2\xb8\xbe\x51\xb6\x47\x49\xf9\x80\xc6\x24\x90\xd1\x76\x73\x90\xf9\x69\x3f\xe6\xe6\x8c\x7e\xed\xc8\x84\x9a\xd1\xee\xa1\x3a\x46\xa1\xc5\x32\x87\xa5\xf4\x33\x8b\x65\x4c\xad\xed\x02\xca\x92\x96\xb5\xaa\xb9\x8e\x52\x70\x21\xbc\x85\x8b\x28\x72\x36\xd2\xa8\x3c\x15\x50\xe5\x27\x4f\x1d\xf0\xe7\xe7\xdf\x9b\x65\xca\xad\x8e\x06\xf0\x05\x32\xd4\x2d\x81\x8d\xae\x2d\xf1\xdc\x34\xa2\xae\xf6\x73\xb4\x15\x27\x51\x36\x56\x04\x88\x89\x66\x20\x26\x7a\xf3\x46\x3a\x84\x67\xa4\x9e\xc9\x35\xdc\x8c\x33\xa8\xfc\x74\x70\x1a\x63\xd0\x1b\x74\xc9\xf4\xa1\xa6\xa2\x0e\xda\x40\x5c\x3a\x77\xb9\x5c\x9d\x9b\xe8\x02\x25\x81\xd0\x55\x07\xfd\x17\xa6\xce\x7f\xc9\x24\x18\x51\x46\xad\xff\x90\x80\xe9\x43\x3d\xa1\xbd\x2a\xf6\xa1\x78\x7b\x24\xcb\x97\xc1\x06\x5d\x84\x7e\x10\x14\x10\xa1\x5f\x88\xbe\x2a\x8b\x8c\xe6\x2a\x32\x7c\x70\x27\x4c\xf2\x07\x61\x8a\x76\xfa\x5a\xf9\x46\x07\x37\xec\x9e\xa0\x41\x5f\x3f\x40\x6e\xa8\xec\x46\x6f\x10\xe1\x9c\xd0\xd0\xc5\x9e\x50\xbf\xa6\x50\x1c\xcd\x4a\x04\x74\x45\x1b\xd1\xf7\x34\x62\xef\x29\x27\xef\xe9\x9b\x09\x4a\xb9\x7b\x4c\x9e\x97\x69\xcc\x3d\x05\xe2\x9e\x1f\x07\xe7\x27\xc3\x93\x8f\xf9\x54\x6d\xa8\x7b\x66\x63\xee\x49\x88\x7b\x06\x3b\xfd\x7e\xf6\x45\x39\x61\x8f\xd9\xb5\xd4\xf4\x96\x23\xb1\x41\x57\x1d\x1e\xdd\x50\x12\xaa\x74\x11\x27\x67\xcc\x39\x74\x9d\x20\x73\x90\xf6\x3b\x5b\x5f\x15\xa5\x4f\x81\xcf\x27\x5f\xd5\xdd\x1e\xbf\x16\x4b\xfc\x3a\x64\x77\x84\xc6\xbd\xbf\x38\x3e\xff\x3e\x3c\x3c\x3e\x38\x3c\x3c\xfd\x76\x72\x79\x7d\x79\xfa\xf9\xf8\x44\xf4\xbe\xba\x01\x75\x5c\x40\xba\x16\xdf\xbd\x0e\x18\xd3\xdd\x8a\xed\x2e\x99\x91\xd6\xd5\x5e\x7f\x3a\xbd\xb8\xbc\xbe\xde\x2f\x7d\x77\x76\x7a\x7e\x59\xa0\x50\x6a\xc3\x35\x94\xee\x84\x8c\x2e\x54\x4d\xc0\x52\x9e\xa1\x6a\x9a\xa1\x19\x58\x86\xe2\x7f\xa5\x1c\x43\x49\x20\x63\x83\x98\xbb\x5e\x83\x98\xbb\x94\x98\xf2\x29\x42\xef\x52\xed\xaf\xf9\x7e\x05\xa2\xf0\x20\x0a\x6f\x8d\xa3\xf0\x62\xba\x21\x63\xfc\xc4\x08\x1f\x14\x1f\xa3\x84\x43\xf6\x48\xf6\xf9\x22\xe9\xd6\x70\x44\x59\xf2\x58\x19\xb4\xc4\x72\xca\x80\x5e\xc6\x76\x78\x49\x82\x49\xd1\x07\xc2\x52\x93\xfe\xf8\x97\x1f\x10\x45\x1d\x5a\xe2\x78\xa5\x4d\x6f\xa5\x26\xc7\x62\x6a\xd3\x98\x76\xc2\xc2\x61\xc1\xdf\x19\xc5\x56\x9b\xb2\xba\x54\x7d\x3f\x5d\xea\xb0\x9f\x49\xe1\x25\xf1\x81\x6a\xb9\x99\x01\x88\xa9\x09\xab\xd4\x64\xba\x9c\xf8\xc5\x9c\x49\xaf\xa9\xc5\xb5\x07\x16\xd7\xba\x8c\xb2\x5e\xa5\x73\x59\x62\x77\x94\x5f\x62\xca\xfa\xe8\x7b\x98\xe6\xbc\x87\xd2\x42\xaf\x3a\xba\xd8\xcc\xfb\xb8\x45\x57\x1d\x63\x2c\x3b\x2d\xaa\x26\xa1\xed\xcc\x5e\x65\x3c\x0c\x49\x82\x86\x21\x97\x4d\x83\x13\xa7\xbb\xa6\xf7\xc0\x35\x7d\xc9\xae\xe9\x89\x5b\x64\xde\x03\xef\xd6\xad\x0a\xac\x49\xbc\xef\x3e\x0c\xbf\x1c\x9f\x1d\x5c\x7e\xaa\xac\x38\xe5\xab\x9d\x5e\xa5\x58\xd9\xfc\x81\x7b\x6c\x34\xa5\xd6\xc4\xe7\xef\x9f\x55\xb6\xf9\xab\x4e\xa5\x5f\x72\xa1\x9b\x3c\x74\x58\x14\x36\xad\xb2\xde\xcd\xf0\x07\x0e\xa8\x4b\x47\xc5\xca\x9f\xda\x01\x1c\x7c\x5f\x51\x3b\xdf\xd7\xef\x2b\xf4\x7d\x5d\x91\x1f\xbb\xc9\xb9\x9c\x6d\xc4\x6a\xa3\x53\xc0\xa1\x7d\x41\x0e\xed\xab\x89\xb4\x48\x6a\xb1\x5c\x6a\x0d\xcf\xe2\x0e\xcb\x25\x3c\x65\x55\x88\xd9\xd8\xde\xa3\x3a\xe5\x44\xe7\x91\xaf\x7c\x91\x93\xe3\x5b\xe9\x18\x29\x37\x6a\xb1\x89\xb5\xb6\x09\x97\xff\x3c\xfb\x74\xf0\x79\xef\x42\xdb\x5a\xda\x86\x37\x2d\x3a\xc4\xa3\x68\xf0\x2c\x83\x83\xf5\x89\xdc\x5a\x5d\x58\x11\xf8\xea\xb7\xf4\xd5\x7f\x6a\x67\xfc\x29\xdd\x49\xf2\x2e\xa4\x3f\xcf\xcc\xa9\xfe\xa5\xf8\xb9\x4f\x21\xa5\x48\xcd\xb0\xc0\x4d\x01\xdc\x14\x95\x42\xef\x55\x70\x53\xac\x1d\xb7\xc2\xf4\xc0\x93\x1e\x04\x9e\xe4\xf2\xad\x77\x00\xc8\x15\x7d\x83\x0e\xf5\xbd\x84\x9e\xa7\x70\xae\x82\x28\x94\xcb\x9b\xbb\xd4\x3d\x70\x32\x9b\x82\x7a\xc3\x6c\x96\xba\x20\xa1\x89\x9a\x26\xcb\xe5\x2a\x8d\x83\x13\x5e\xf4\xdd\x20\x25\x7e\x3c\xc0\xc0\x0f\x0c\xfc\xc0\xc0\x0f\x0c\xfc\xc0\xc0\x0f\x0c\xfc\xc0\xc0\x0f\x0c\xfc\xc0\xc0\xff\x04\x0c\xfc\xcb\x0e\xfc\x14\xff\x93\xee\x21\x94\xa0\xee\xa1\xc7\x22\xe7\x2c\x60\xf7\xae\x43\x82\xe4\x04\x35\xb9\x0d\x75\x96\x4b\x49\xe5\x49\x82\x51\xea\xef\xdf\x56\x2c\x0c\x1b\x5c\x50\x5a\x99\x3b\x75\xa9\x35\xdb\x50\xf8\x64\xbd\xfb\xad\x2a\x89\x5d\x11\x0e\xa9\x8a\xb7\x5c\xdf\xf5\xad\x81\xfe\x44\xa6\x3b\x92\xed\x3a\x81\xee\x95\x79\x3e\xa2\xdf\x8a\x6c\x5f\x99\x43\xf6\x91\x3e\xd1\x48\xca\x0e\x4f\xa3\x70\xc4\x5c\x3a\x8a\x4d\x78\x35\x5f\xf7\x8c\x39\x35\x9f\xd4\x0e\x6b\xc7\x2e\x69\x6d\x29\x4f\x78\x40\xe4\xf1\x96\x9a\x9e\xfb\xe8\x94\x7e\x50\x76\x56\xf1\x2e\xef\x8f\x53\xe9\x93\x52\xe6\x8d\x52\xed\x78\x53\x96\x3a\xe7\x7b\x61\x95\x75\x30\x63\x74\x8b\x5f\xc4\x43\x9d\xb1\xb7\xdd\xb8\xb4\xc7\xc7\x9a\xf3\xd8\x56\xff\x4d\x8a\x42\xd8\xf7\xbd\x07\x64\xdd\x56\x4f\x26\x15\xc5\x1c\x9b\xe9\x52\x03\x5d\xbd\xa3\x43\x45\x2c\x5d\x89\x35\xc6\xca\xce\xad\xac\x21\x2a\xc3\x88\xc2\x58\xd8\x93\x30\x63\x58\x48\x6a\x72\x27\xe6\xb2\xdc\x24\xae\x5d\x74\x72\x64\x43\x32\xc9\x86\x45\x4a\x73\xd9\xd4\x7c\xe9\x0e\x7f\xca\x60\xd2\x02\xd9\x03\x09\x6d\x07\x5d\x7e\xb9\x40\x9c\xd8\x81\x90\x6c\x86\x3f\x58\x12\x02\x48\xb1\x67\xdd\x0f\xba\x83\xad\x86\x21\x80\x68\x48\x6d\x2f\x72\x08\xc2\xb1\xfa\x13\x30\x8f\x24\xc7\xc9\x66\xc4\x6e\xe2\x51\xbb\x21\xdd\x79\xa9\x83\x6e\x5c\xea\x20\x57\x7a\x00\xe4\x13\xeb\x63\x32\xac\xf0\xb5\xbb\xd4\xb0\x42\x33\xa2\x30\x35\xc2\x2a\xef\xe2\xec\x2d\xe6\x3e\x73\xf8\x86\x72\x79\x53\x54\x16\x69\x4c\x61\xe9\x9d\xe3\x0d\xc3\xf8\x72\x51\x6b\x2d\x82\x15\xe7\x89\xb5\x9b\x16\xd0\x97\x1a\xbc\x1c\x97\xdb\x92\x0f\x55\x27\x97\xa7\xf0\xe2\x13\x63\xe7\x9e\x04\xa1\xcb\xc9\x84\xd0\xd4\x5d\xaa\x32\xa0\xac\xa2\x54\x79\xdc\xcb\xbb\xf3\x75\x4e\x8e\x5f\x4d\xb4\xe5\x1b\x74\x92\x58\xb5\x90\xed\x11\x2c\x9d\x72\x4f\x98\x43\x34\x44\x1b\x1e\xc9\xe8\xd6\xc3\x23\xa3\x3d\xbe\xd9\x9d\xc4\x20\x26\xdd\x24\x38\xe2\x6c\x42\x72\xb1\x29\xc6\x0e\x40\x2c\x3e\x39\xa1\x0d\x47\x64\xa3\xe4\x34\xa2\xeb\x0d\xfa\x21\x6a\x51\xcd\x1b\x63\x3a\x52\xd7\x0b\x18\x0e\x62\xa6\x62\xe3\x56\x4c\xb8\xd5\x05\x07\xa6\x06\x63\x3d\x1c\xa2\xe1\xda\x46\x68\xf4\x7f\xee\x65\x91\x5b\x07\xc6\xa4\xd5\xd8\x50\xd1\xec\x8a\x68\xca\xc2\xfa\x96\xdf\x96\x70\x33\x58\x77\xb6\x36\xd7\x46\xfa\xfa\x69\xfd\x7a\xb8\x26\x8c\xba\x21\x0b\xb8\x64\xb1\x66\x11\x47\x87\xe7\x47\x6a\x51\xe9\x0b\xed\xcb\x1a\x01\x11\x98\xcb\x88\xc0\x6c\x12\x85\x9b\x7e\xb7\x88\xc7\xc4\xd3\x52\x02\xa8\x15\xac\x50\x40\x7e\xc2\xd4\x81\x69\x41\x9f\xb0\x30\x84\xb5\xdf\xad\x72\x30\xa6\xc5\x9d\x9a\x50\xd4\x0a\xe1\xf4\x49\x5d\x72\x60\xb4\x82\x60\xed\xec\x72\xe6\x92\x8e\x69\xe2\xaa\x88\x00\x19\x2a\x21\x79\x61\x02\xf2\x77\x14\xb8\x1a\xf2\x33\xf7\x94\xdc\x6f\x76\x77\x13\x2e\x2a\x55\x4e\x4a\x48\xa5\x9d\xc3\x8c\x0b\x60\x18\x2d\xb9\x8f\xa4\x1c\x26\xda\x7f\xde\x9a\xc5\x93\x5d\x83\x4d\x3f\x20\x84\x03\xaf\x24\x1c\x58\x13\x36\x7d\xf0\xb0\xf4\xdb\xcc\x74\xe7\x0d\x3a\x8b\xa4\xbf\x9a\xa2\x43\x2b\x92\x32\xd9\x2c\x20\x8c\xf7\x6e\x55\xe6\xde\x8d\xc7\x6e\x7a\x2a\x50\xad\x77\xc4\xec\x48\xa8\x53\xb2\x49\x8a\xaf\x49\x27\xb3\x64\x93\x1f\x84\x72\xbf\x14\x6d\x58\x57\x53\x45\xb0\x91\xce\xb4\xd4\xc1\x7a\x26\x7a\x8a\x59\x0a\xad\xc1\x89\x06\xbc\x09\xad\x6a\x69\x22\xc8\xf5\xc7\x17\xab\x44\x82\xdf\x6d\x71\x16\xa4\xbb\x1a\x8a\xbd\xdc\xc4\x2d\xdb\xcf\x24\x2b\x6e\x11\xdb\x1a\xb1\x8d\x4b\x3f\xe7\x42\x97\x68\x52\x6c\xab\xe5\x49\x71\xad\xcd\x24\x3f\x9e\xb1\x7b\x51\xe3\xe1\x5c\x2e\xde\x89\xe1\x4c\xf6\x8a\x4b\x00\xbc\x05\x0f\xe6\x32\x48\x0f\x94\x27\x80\x18\x7b\x3d\xce\xe8\x7e\xab\x3b\xe8\x0e\xc4\xf3\x18\xe0\x1c\x66\xf3\xa2\xf4\xeb\x89\x84\xbd\x80\x78\x04\x73\xc2\xdf\xa4\xd9\xf2\xe1\xc0\xd2\x90\xa0\xdc\xcb\x6e\x99\xe7\xb1\x9f\x62\x87\x66\xb3\x89\xcf\x28\xa1\x61\x6c\xf9\x15\x43\xf4\x06\x21\xd3\x4f\x65\x3f\x2d\x33\x79\x6e\x53\x37\xfb\x38\x83\xb8\xfb\xf7\xfd\xee\x3b\xf1\x2a\x99\x7a\xb2\x31\x89\x65\x15\xb9\x5c\xb9\xe0\xcb\x0b\x42\x94\x74\x96\x1c\x73\xc4\xbb\x95\xac\x88\xc4\xd1\xa3\x61\xaa\x04\xdd\xd9\x2c\xb4\x7a\x7e\x25\x66\xa5\xa9\x46\x59\xb5\x89\x71\x69\x48\x82\x5b\x1c\x07\x0b\x48\xd7\x1d\xd1\xa2\x98\xca\x13\x5d\x45\xfd\xfe\x96\x6d\xc9\xff\x28\xff\x2a\xe3\xb2\x93\x78\x6f\xf6\x06\x0d\x6f\x91\x47\x6e\x43\x74\xe3\x61\x7a\xb7\x21\x86\x5f\x29\x74\x69\xf9\x2e\x8f\x9d\xeb\x53\x66\x4e\x31\xee\x7f\x72\x55\x42\xec\xcf\x2f\xc3\xa9\x64\xa9\xb2\x25\xd7\xee\xad\xec\x84\xf2\x66\x17\x4f\x86\x71\x91\xca\xa1\x5d\xef\x7b\xc7\x24\x1c\x93\x40\x28\x5a\x94\x49\x63\xd0\x04\xf3\xbf\x23\x22\xb5\xaf\x30\xc0\xb7\xb7\xae\x2d\xed\x05\x84\x87\x3a\xac\x8c\xcb\x94\xca\x17\x5d\x15\x12\x47\x0d\xea\x4d\x82\x6c\x44\x5a\x8c\xe9\x10\x9c\xdd\x05\xb6\x23\x97\x40\x32\x1f\xf7\x89\xed\x62\x6d\x62\x53\xa1\xa1\xca\xc6\xa6\xdc\x25\x6f\xdd\x51\x1c\xa2\x80\x70\x14\x32\xa1\x26\xdb\xd8\xf3\x1e\x90\xcf\xfc\xc8\xc3\x71\x78\x6e\x13\x9a\x8a\x66\x3c\x15\xd3\x89\x2a\xaa\x98\x2a\xb2\xd4\x06\x15\x5c\x15\x8f\x8f\x16\x72\x6f\x51\xf7\xeb\xe5\x37\x1d\x45\x2a\xff\xa6\x44\x3d\xea\xa3\xac\x27\x66\xc2\x23\xf1\xf8\x28\xde\xff\xfe\xad\x8b\xd0\x40\x62\xfc\xd3\xcc\xd3\x84\x05\xa3\x25\x0d\xc6\x8c\x3c\x18\x95\x7c\x17\x0d\x09\x2f\x9a\x31\x5e\x64\x19\x27\x6a\x38\x2f\xca\x68\x2f\xea\xca\xa9\xa3\xae\x68\x47\x2d\x61\x70\x4b\x64\x6a\xa9\x9c\x33\xa5\xf4\x12\x35\xfc\x12\xb3\x10\x4c\xa4\x8d\x32\x29\x26\xc4\x4a\x8c\x35\xf1\xd2\x25\xdd\x55\xb6\x70\xe2\x08\x58\x63\x19\x9d\x2d\xcb\x28\x4c\x49\x28\xa1\xb8\xfb\x7f\xb8\x80\xec\xff\x5b\x58\x8f\x27\xf1\xea\x2b\x0f\xd3\x31\x56\xdc\x5f\xfa\x3a\x98\xec\x67\xb8\xea\x5c\x26\x03\x26\x2f\x86\x92\xad\xd0\x69\xbb\xe2\x9d\xf9\x19\x7e\x27\x9d\x7c\x95\x2c\x1a\x53\x34\x9d\x6a\xfe\x0c\x99\x71\x46\xe6\x0c\x5d\xe9\x6b\xe1\xcc\xa0\x29\x63\x27\xb0\x65\x00\x5b\x06\xb0\x65\xcc\xc1\x96\x21\xf4\x6f\xe0\xc9\x28\xc9\x58\x4e\x1d\x61\x40\xcf\x2b\xba\x94\xbb\xf9\x2d\xdb\x10\x72\x95\xcd\x35\x43\xc8\x15\xc5\xde\xcb\x89\xb8\xca\xda\x09\xf2\xbf\xd7\x13\x70\x05\x97\xe8\x56\x15\x97\x8b\x13\x02\x8a\x9d\xa7\xa7\xd8\x81\xdb\x5f\xab\x17\xf1\x11\xa3\x7f\x86\x31\x9f\xed\x5f\x1f\xcf\x16\x72\xe9\x6b\x35\xf5\xd0\x0a\x89\x78\x94\x72\x53\x68\x81\xd2\x09\x36\xc4\xc6\x99\x2b\xc5\x60\x03\xe1\x50\xdf\x53\xa0\xd8\x89\x02\x22\x76\x66\x9e\x2b\xf6\x31\x9e\x87\x5c\x5f\xdd\x57\x85\x78\x58\xb8\x53\x27\x4f\xb9\x71\x79\xf0\xd7\x97\xe3\x8b\xf3\xe3\x0f\xe7\xc7\x17\x9f\x86\x27\x97\xc7\xe7\xdf\x0f\xbe\x54\x36\x73\xb7\x84\x0c\xe6\x84\x19\x1c\x0e\x3a\x10\xb0\x3d\x95\x03\xd0\xbe\xcc\x48\xfb\x32\x78\xb7\xd9\x1d\xec\xee\x75\xfb\xdd\x7e\x6f\xb0\x0b\x4c\x4e\x6b\x73\x8b\x29\xf0\xa8\x2d\x9d\x47\x0d\x58\x60\x5e\xd6\x8d\xad\x8b\x26\x89\x99\xa7\xbb\xcf\x8c\x43\x06\x2e\x66\x5d\x04\x61\x4d\x20\x76\x39\xf1\x29\x4f\x7a\x72\xad\x4e\x10\xe4\x09\x9d\x35\x19\x05\xa8\x70\x6b\xff\x1b\xe5\xdb\xae\x6f\xbf\x14\x02\x56\x26\xe6\x25\x9b\x15\xd3\x3d\xab\xc2\x74\x22\xf4\x40\x7d\xc8\x33\x9c\x8c\xaa\x2c\x28\xe8\xaa\x23\xf7\x8b\x37\x2e\x8d\x7d\x14\x1c\x1d\x4f\x63\xb9\xbe\x35\xc1\xfc\xef\xf8\xcf\x5c\xfb\xaf\x3a\xe8\xdf\x8b\x01\xb8\xe2\x0e\x2a\xee\xe6\xd9\xe9\xd1\xd2\x36\xf9\xf1\x59\x45\xb7\xc8\x88\x96\xaf\xfe\xe2\xec\xe0\x70\xf9\x6d\x90\x07\x4d\x15\xa2\xe7\xcb\xc1\xc9\xc9\xf1\x97\xa3\xeb\xe1\x87\x96\x2d\x59\xb4\x95\x27\xf5\xaf\x98\xd6\xd2\xb3\xeb\xaf\x07\x17\xff\xeb\x09\xdb\x9a\xba\x61\x34\x42\xdc\x36\x48\x38\x0d\x07\x6b\x80\xc9\xca\xba\xb3\x59\xf6\x6d\xae\x07\x66\x2b\x48\x68\x67\x3c\x31\x7b\x71\x52\x20\xd2\x59\xbb\x1b\x9c\x6f\x1e\xe2\x6f\x5e\xf2\x55\x2a\x66\x43\x31\xcc\x4f\xfd\xea\x56\xc1\xba\xf3\x05\xcd\x1b\xfc\x3c\xe5\xe8\xde\x70\x68\x58\x22\x2f\x11\x50\x12\xcd\x43\x49\x04\xd4\x3b\x40\xbd\x03\xd4\x3b\x40\xbd\x03\xd4\x3b\x40\xbd\x03\xd4\x3b\x40\xbd\xb3\x32\xea\x9d\x02\x1d\xc5\x0e\xd0\x51\x00\x1d\x05\xd0\x51\x20\xa0\xa3\x00\x3a\x0a\xa0\xa3\x58\x20\x1d\x85\xeb\xe3\xc9\x8d\xc7\xec\xbb\xe5\x52\x54\x94\xa6\x06\xde\x0a\xe0\xad\x00\xde\x0a\xe0\xad\x00\xde\x0a\xe0\xad\x00\xde\x0a\xe0\xad\x00\xde\x0a\xe0\xad\x00\xde\x0a\xe0\xad\x00\xde\x0a\xe0\xad\x00\xde\x8a\x42\x59\xc0\x5b\x01\xbc\x15\xc0\x5b\x01\xbc\x15\xc0\x5b\x01\xbc\x15\xc0\x5b\x01\xbc\x15\x2b\xe1\xad\x98\x76\xe2\xac\x52\x4d\x65\xb7\xc8\x7d\x12\x20\xbb\x00\xb2\x0b\x20\xbb\x00\xb2\x0b\x20\xbb\x00\xb2\x8b\x97\x45\x76\x71\x98\x18\x1d\xe5\x58\x20\x3c\x1a\x05\x64\xa4\x55\xeb\x78\xe9\x09\xf1\x2f\xf6\x9d\x15\xcd\xf8\x76\x71\x7c\x7d\x76\x7a\x54\x1f\x29\x5f\x1e\x94\x0e\x5c\x1b\xc0\xb5\x91\xb4\x00\xb8\x36\x80\x6b\x03\xb8\x36\xb2\x6d\x02\xae\x0d\xe0\xda\x00\xae\x0d\xe0\xda\xc8\xf6\x16\xb8\x36\x80\x6b\x03\xb8\x36\x80\x6b\x03\xb8\x36\xb2\x3f\xe0\xda\xc8\xb7\x02\xb8\x36\x80\x6b\x43\xff\x80\x6b\x03\xb8\x36\xb2\x5c\x1b\xda\x25\xf1\x42\x16\x16\x1b\x37\x7b\xf1\x19\x3f\xef\xdd\x39\x8e\x65\x07\x0e\xd7\x37\x94\x03\x35\x07\x50\x73\x00\x35\x07\x50\x73\x00\x35\x07\x50\x73\x00\x35\x07\x50\x73\x00\x35\xc7\xd5\xf3\xa7\xe6\x58\xe9\xa0\xb5\x99\x6a\xcb\x19\xb0\xc2\x3c\xab\x99\x61\xe6\xdc\x2a\x30\x98\xec\x2a\x06\x93\x69\x2a\xb2\x76\x08\xd0\x41\xf7\x4a\x4f\x5e\xbb\x20\x9f\x07\x7f\x8c\x45\x23\x1d\x75\x6a\xa2\x0e\x35\x43\xf1\xf4\x5a\x7b\xca\x5d\xab\xc2\xcd\xe3\x96\x97\x12\x1c\x04\xa1\x41\x4b\x0c\x0d\x1a\x40\x68\xd0\x62\x43\x83\x1a\x07\x07\xad\x22\x3c\xa8\xa4\xca\xbb\x3d\x7e\x8d\xa3\x70\x7c\x1d\xb2\x3b\x42\xe3\x3e\x5f\x1c\x9f\x7f\x1f\x1e\x1e\x1f\x1c\x1e\x9e\x7e\x3b\xb9\xbc\xbe\x3c\xfd\x7c\x7c\x22\xfa\x5c\xd7\x90\xda\xf8\x22\x88\x2e\x5a\xd3\xe8\x22\x30\x3a\x81\xd1\x09\x8c\x4e\x60\x74\x02\xa3\x13\x18\x9d\xc0\xe8\x04\x46\x27\x30\x3a\x81\xd1\xe9\xd5\x1a\x9d\xa6\x05\x2b\x4e\x53\x95\x15\x4f\x97\x54\x93\xd7\x84\x4b\xb7\x01\xcf\x1b\x70\xe9\x16\x2b\x01\x2e\xdd\xe4\x4b\x01\x97\x6e\xe1\x5b\x02\x97\x6e\xae\xcd\xc0\xa5\x0b\x5c\xba\xc0\xa5\x0b\x5c\xba\x6b\xc6\xa5\xbb\x86\x24\xaf\xb3\xf0\xbb\xb6\x10\x3b\x8b\x50\xb5\xe6\x63\x74\x9d\x5b\xd9\x79\x56\x54\xae\x53\x3e\xd7\x1a\xf0\xb7\x2e\x9a\x58\xb9\xf9\xc8\xad\x3f\x75\xf5\x53\xd0\xb9\x36\xf4\x9f\x90\x82\x28\xe3\x3d\x51\x45\x3c\x27\x56\x46\x3d\xe5\xdc\x13\x71\xcd\x61\xdf\xe7\xd5\x3b\x5c\x20\x98\x8b\xdb\x07\x04\x73\x40\x30\x67\x64\x05\x82\xb9\x67\x43\x30\x77\xa9\x9e\x13\xe5\x46\xa4\x08\x1e\x10\x67\xf1\x4c\x4f\x58\x26\xe2\xb9\x25\xed\x33\x38\x54\xc1\x29\xee\x84\x54\xb0\xb7\xbd\x02\xd6\xb9\xa4\x5e\xe5\xee\x63\x09\x89\x9f\x5f\x71\x8c\x86\x01\xf3\x7c\x0f\xe7\xf9\x0a\xd2\x42\xaf\x3a\xba\xd8\xcc\xfb\xb8\x45\x57\x1d\x63\x2c\x3b\x2d\xaa\x26\xa1\xed\xcc\x5e\x65\x3c\x0c\xc6\xc4\x02\x8a\xbd\xb9\x28\xf6\x62\xfc\x3f\xf4\x30\xe7\x27\x86\xf6\x24\xd5\x20\x2b\x9e\xed\x71\xf2\x59\x19\xf9\x80\x8a\x0f\xa8\xf8\x80\x8a\x0f\xa8\xf8\x4a\x8b\x2b\xc4\xc0\xbe\x41\x07\x8e\x23\x84\xed\x07\x8f\xfc\x42\xdf\x65\x07\xd0\x51\xe0\x0a\x15\x53\x2a\xa6\x6a\xa1\x89\x14\x3e\x09\x04\xf0\xa1\x6f\xd4\xfd\x85\x8e\xd8\x04\xbb\x14\x5d\x30\xfb\x8e\xc8\xe3\x43\xec\x79\xec\x27\x3a\x72\xef\xc4\xee\x2a\x83\x3c\xd2\xfd\x3d\x76\xfd\x26\x4a\xff\x56\x2c\x49\x4c\xd5\x42\x90\x3a\x7a\x46\x17\x0f\xd4\x46\x07\x67\x65\x21\xc1\xb7\x1e\xf9\x75\xcf\x3c\xcb\x91\x4d\x2b\x87\x21\xd1\x85\xef\xac\x48\x29\x51\x13\x57\x5f\x56\xb8\x55\x24\x4a\x29\x0c\x77\xb6\x19\xf5\x8c\x86\x99\xad\x2b\x70\x19\x66\x5b\x03\x5c\x86\xed\x6a\x03\x2e\x43\xe0\x32\x7c\x76\x5c\x86\x4b\xe0\xf5\x03\x72\x3e\x20\xe7\x03\x72\x3e\x20\xe7\x33\xb3\x01\x39\x5f\xf2\x03\x72\xbe\xf5\x27\xe7\x43\x06\x46\x28\x85\x9e\x3f\x50\x3b\x9f\xbd\xac\xe7\x42\xdd\xc2\x23\x42\xc3\x1a\xa3\x10\x30\xfd\x01\xd3\x1f\x30\xfd\x01\xd3\xdf\x93\x31\xfd\x51\x5c\xc6\x26\x07\x1c\x7f\xc0\xf1\xf7\x8c\x39\xfe\x8a\xbd\xd0\xfe\xa3\xd5\xc6\xcf\x92\x29\x59\x2e\xcd\x2b\x1a\xa2\xbe\xdd\x91\x1b\xc8\x93\xde\x87\xe2\x07\x44\xc5\x39\x5b\xaa\x00\xe4\xc7\xbd\x68\xd1\x2d\xfb\xf0\xb5\x96\xcf\x05\x34\x39\xe2\x72\x39\x90\x5f\xc4\x36\x4e\xdd\xe4\x3f\x3d\x12\x5a\xda\x65\xa7\xa7\xb0\xa4\x27\x93\x25\xbd\xfb\x7f\x91\xc3\x4b\x63\x8a\x16\xcb\xbb\x58\x20\x81\x79\x0b\x24\x30\x40\x02\x03\x24\x30\x40\x02\x03\x24\x30\x65\x35\x03\x09\x8c\xf9\x0a\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x80\x04\x06\x48\x60\x4a\x3a\x01\x24\x30\xe9\x3b\x20\x81\x99\x42\x02\x23\xfb\x26\xda\xaa\x63\x53\x2b\xd7\x08\xb0\xc3\x00\x3b\x0c\xb0\xc3\x00\x3b\x0c\xb0\xc3\x00\x3b\x0c\xb0\xc3\x3c\xbb\xb1\x05\x76\x18\x60\x87\x01\x76\x18\x60\x87\x01\x76\x18\x60\x87\x01\x76\x18\x60\x87\x01\x76\x18\x60\x87\x01\x76\x18\x60\x87\x01\x76\x18\x60\x87\x01\x76\x18\x60\x87\x29\x0e\x3d\xb0\xc3\xb4\xab\x0d\xd8\x61\x80\x1d\x06\xd8\x61\x80\x1d\x06\xd8\x61\xca\x2a\x06\x76\x98\x8c\x75\x0e\xd8\x61\x80\x1d\x06\xd8\x61\x80\x1d\x06\xd8\x61\xd4\x0f\xd8\x61\xa6\xb6\x01\xd8\x61\x80\x1d\x06\xd8\x61\x80\x1d\x46\xfe\x80\x1d\x06\xd8\x61\x5e\x21\x3b\xcc\x9e\x22\x87\x99\x76\xe0\x99\xf8\xde\x9e\x33\x09\x4c\x73\xf8\x53\xea\x28\xa9\x4a\x8f\xb2\x99\x7d\x2a\x1b\x46\xb7\xa0\x85\xf9\x45\x4e\x75\xd0\x4f\x7d\x97\x17\xeb\x1c\x5a\xdb\x05\x94\x8d\x0d\x68\x55\x73\x5d\xe4\xce\x42\xc2\x83\x96\xec\xed\x5a\x33\x3e\x91\xe1\xb5\x55\xfb\xc9\x97\x10\xed\x32\xcb\xcc\x7b\x01\x51\x19\x53\x22\x30\x2a\x5f\xcc\x12\x51\x51\x11\xce\x31\xcd\x39\xbd\xfd\x84\x49\x30\x3a\x75\x65\x0f\x14\x26\x3e\x67\x27\xf6\x6a\xd0\x9d\xe2\xc9\x9e\xf6\x3f\xb5\x38\xce\xed\xd0\xde\xb4\xd0\xf9\xfc\xda\x5b\xd5\xd2\xd8\xbd\xfd\xc9\x1d\xdc\xeb\xe4\xa7\x10\xea\xeb\xe4\xeb\x5e\x36\xa6\x0a\xed\x5a\x0c\x69\xcb\x81\x6b\xe9\xe4\x2e\xf5\xfe\x58\x31\x59\x82\x97\xfb\x1a\x38\xb8\x2b\xf5\x4f\x8c\xbd\x1e\x67\x74\xbf\xd5\x1d\x74\x07\xe2\x79\x0c\x73\x0e\xb3\x79\x51\x5e\xf5\x44\xc2\x5e\x40\x3c\x82\x39\xe1\x6f\xd2\x6c\x79\xff\x77\x19\xee\xad\x4e\xd3\x6f\x99\xe7\xb1\x9f\x2e\x1d\x21\x9b\x4d\x7c\x46\x09\x0d\xe3\xf0\x75\x31\x44\x6f\xc4\x5e\x4d\xb9\xd8\x8b\x76\xee\xa7\x65\x26\xcf\x6d\xea\x66\x1f\x67\x70\x77\xff\xbe\xdf\x7d\x27\x5e\x25\x9e\xfb\x39\x8e\x43\x1b\x53\x74\x43\x9e\x37\xcf\xe1\x8b\xe0\x2b\xbc\x5a\x37\xc6\xc2\xab\x55\x10\x0d\xf6\x17\x44\x34\x68\x26\x58\x07\x3e\xc0\x86\x6c\x80\x0b\xe6\x02\x9c\xc6\x04\xb8\x4a\x1e\x40\x51\x91\xef\x5e\x07\x8c\xe9\xde\xc5\xe0\x9a\x19\x5a\x5d\xf3\xf5\xa7\xd3\x8b\xcb\xeb\xeb\xfd\xd2\x77\x67\xa7\xe7\x97\x85\x91\x07\xae\xc1\x35\xe1\x1a\xdc\x30\x33\x7c\x3f\x19\x6a\xfa\xcf\x5c\xea\xef\x27\xc3\xdf\xbf\x33\x49\xcf\x58\x10\x96\xa7\x15\x6f\xd2\x03\xa9\x02\x87\xe1\xd4\x88\xb4\x44\xdc\x4e\x89\x4b\xd3\xaa\xdb\x13\x85\xa7\xa5\x9c\x3c\xd3\xd5\xe4\x3a\x41\x0b\x91\x6a\xab\x88\x54\x83\xe8\x2d\x88\xde\xaa\x88\xde\x9a\x1a\x58\x03\xe1\x5d\x10\xde\x35\x47\x78\xd7\x2b\x0a\xc4\xaa\x77\xd7\x2f\x91\xeb\xe0\xb4\x9f\x6d\xcd\x02\x9c\xf6\xc1\xff\x74\xe9\xfe\xa7\xaf\xd0\xfd\x3e\xf9\xc4\xae\x47\x92\xd1\xe6\x4c\xaa\xb2\x76\xe8\x89\x47\x5c\x6a\xd5\x55\x7e\xd5\x8b\x73\x37\x7f\xee\x91\x00\xa9\xcb\xf9\xf7\x5d\xc4\x23\x5f\xec\x32\x45\x63\xa5\x8b\xfc\xb2\xbc\xce\x57\x1b\x2b\xb3\xdc\x60\x87\x27\x0b\xaa\x79\xbe\x81\x0c\xe0\xf1\xde\xd2\xe3\xfd\xa5\xb9\xb4\xaf\x93\xcf\xfa\x4b\x71\x23\x9f\x42\x07\x90\x9a\x9f\xd6\x9f\x15\x60\x86\xf8\x7e\x79\xea\x06\x01\xfe\xc5\x46\x3f\x79\x90\xfe\x5a\x06\xcb\x43\x90\x05\x04\x59\xcc\xdf\x06\x08\xb2\x78\xe2\x20\x0b\x55\x76\x10\xe5\xb0\x24\x17\x7b\x51\x96\x67\xbe\x88\x09\xd4\x20\x0e\xa0\xd0\x8c\x1a\xbf\xff\x0a\xa5\xa0\x3e\x30\x23\x31\x70\x41\x78\x06\x2b\x8c\xd0\xcb\x88\x5b\xa8\x0d\xf6\xc8\xcd\xec\xa9\x71\x27\xc5\x12\x16\x18\x2e\xb2\xea\xa8\x18\xe9\xb7\xa5\x29\xc1\x3d\x4f\xa9\xc8\x15\xb7\x92\xc4\x26\x03\x21\xab\x0d\x6f\x7f\x65\xc6\xb5\x24\x6d\xf5\x44\xad\xa2\xe5\x5e\x87\x12\x0f\xfa\x8b\xbe\xcc\x0e\xae\xb3\x83\xeb\xec\xd6\xef\x3a\x3b\xb8\xa2\x0d\xae\x68\xcb\x0d\xe4\x13\x5e\xd1\x06\xd7\x8c\xc1\xbd\x76\x2b\xbd\xd7\xee\x05\xdf\xe6\xb9\x9c\x08\x53\x16\x10\x87\x72\xeb\x7e\xd0\x1d\xec\xaa\x20\x53\x59\xb5\xbe\xef\xbc\x26\xd6\x74\xf6\xa6\xa9\x2a\xeb\x1a\x37\xef\x85\x08\xb9\x7a\xb3\x7e\x82\x19\xf7\x8e\x1b\xc6\x42\x1e\x06\xca\x97\x55\xd5\x63\xe9\x23\x2b\x7e\x95\x8f\xb8\x48\x5a\x1e\x07\x61\xe5\xe3\x98\xd2\x18\xa6\xec\xb9\x4f\x2e\xc0\x32\x1b\xc1\x99\x46\x65\xe5\x83\x63\xd3\x00\xa7\x34\x92\x2a\x89\xa2\x6a\x5e\x77\x12\x9f\x65\x96\x27\xe3\xbd\x96\x73\xf1\x44\xd1\x91\xb0\xac\xd8\xcc\x67\xc0\x51\xc8\x94\xcb\x65\xf6\x00\x6e\xf1\x5f\x6e\xa1\xb1\x41\xf9\xd2\xdb\xc4\x08\x4d\x5f\x05\x62\x0d\xc6\xae\x77\x85\x2f\x65\x7c\x8f\x34\x8c\x65\xb6\xd5\x96\xe4\x38\x64\x01\xb9\x75\x3d\x92\xf8\x54\x77\xf7\x77\xb6\x4c\xc7\x68\x12\x04\x2c\x30\x2c\x2a\x63\x82\xbd\xd0\x70\x16\x0a\x08\x76\x0c\x07\xb8\xf4\x83\xa1\xd4\x0b\x5b\x31\x15\xfc\xfe\x8d\x1e\x1f\x05\xc2\x74\xcf\x89\xc0\x54\x72\x38\x3c\x3a\xe7\xe8\xf7\xef\xc7\xc7\xb2\x47\xc4\xe3\x44\xfd\xeb\xaa\xe3\x52\x0b\x3b\x4e\xd0\xc5\x81\x8f\x91\xeb\xef\xca\x7f\x5c\x75\x74\x42\x39\x5e\x59\xff\x77\xe9\x52\xe8\x52\x69\x23\xce\x98\xe3\x6e\xb1\xe7\x85\xe3\x80\x45\xa3\x31\x2a\x2f\x35\x49\x6c\x58\xb0\xfd\x80\x4d\x48\x38\x26\x11\x47\xfb\xef\x06\x3b\x5b\x57\xf4\x2a\xd4\x70\xd9\xfd\xe6\xf3\x30\x20\x78\x22\x15\x18\x12\x88\x6e\x64\x5c\x2d\x6f\x59\xf0\x13\x07\x0e\xea\xa2\xc7\x47\xed\xa0\xe9\x26\x0e\x9a\x25\x99\x45\x9f\xdc\x5b\xf4\x0f\x57\x8e\x97\xf4\xbe\x7c\x7c\xec\x8a\xff\x8b\x83\xbe\x64\xd5\x7a\x78\xca\xea\xb9\xea\x48\xc3\x8c\xc0\x04\xef\x5e\x1e\x3b\xc9\xf0\x9a\xd0\x9c\x5c\x89\xb5\x04\xdb\x63\x82\xb6\x8c\x23\x52\x8f\x31\xdf\xfc\xbc\x1e\xc3\x8e\xf9\x16\x3b\x37\xd8\xc3\x34\x36\xc8\x96\x4d\xd4\xe4\x36\x12\xed\x14\x9e\x38\xdd\xcc\x3a\x5d\xab\xfc\xbe\x65\x22\x27\xde\x47\x64\xc1\x42\x15\x7e\xd5\x11\x33\xfc\xe8\xe4\x42\x0c\x41\xaa\x04\xac\x85\x93\x77\x1b\x2f\x76\x73\x24\x66\x77\x0e\xcf\x8e\x67\xa5\x7f\x38\xb1\x6d\x36\xf1\xbb\xf1\x09\x4c\xa9\x9b\xb8\xf2\x0e\x77\x98\x7d\x47\x82\x9e\x06\xe9\x32\x5f\xf1\x6a\x3e\x7a\xbd\x19\x34\x28\xe9\xa7\x6b\x28\x71\x03\x4b\xbc\x94\xe3\x59\x56\x5c\x05\xe5\x3e\xcb\x89\xeb\x6b\x89\xbf\x6f\x0b\x87\xd7\x39\xbc\x7e\x67\x73\x73\x2e\x75\x0e\x47\xe5\xce\xed\xfb\xc8\x73\x69\xf4\x2b\x4e\xb5\x1c\x17\x72\x3d\x95\xd6\x33\x26\xa0\xca\xef\x59\x05\xc5\x14\xb3\x55\x7f\x97\x12\x7f\x5f\xab\x88\x80\xea\x67\x1c\xab\x2a\xf4\x2a\xb8\x30\xc8\x14\x67\x91\xe7\xa9\x1d\xee\x3e\x1a\xde\x9e\xb0\xf0\x2c\x20\x3c\xc3\xc7\x53\xee\x8b\xe4\xb9\x13\xb7\x70\xdc\x3d\x21\x13\x16\x3c\xec\xa3\xc1\xdb\xfe\x57\x37\xff\x35\x4a\xfc\x96\xa4\xd7\xd2\xa0\x9f\xf5\x5a\x4a\x8b\xc9\x96\x82\x83\x11\x57\xa7\xbf\x96\x92\x47\x1b\xb1\x94\xd2\x7d\xef\xc5\x8a\x48\xf6\x98\xb7\xe2\xf4\x2c\x1d\x38\x81\x00\x96\x4a\x65\xb6\x23\x7f\x0e\x56\x18\xe1\x6a\x1f\x1d\x9f\x05\xb9\xba\x92\x2f\x77\x26\x1d\x8e\x84\xf8\x4f\x8b\x51\x0d\xc9\x95\xed\x07\x2c\x64\x36\xf3\xf6\xd1\xb7\xa3\xb3\xf6\x45\x59\xa1\xed\x97\x17\x77\x79\x58\x57\x9c\x52\x4d\xf2\x05\x4e\x48\x18\xb8\x76\x45\xfb\x32\x05\x56\xfb\x9c\x95\xbb\x68\xe9\xf3\x8f\xbc\x4a\x88\x12\xdf\xac\xbd\xfe\x5e\xce\x4f\x8c\xdb\x63\x22\x1a\xf5\xe9\xf2\x32\xe3\xec\x58\xea\x64\xb6\x9b\xc9\x1c\xba\x13\xc2\xa2\x30\x79\xbb\x63\xbe\xe4\x91\x6d\x13\xce\x0d\xff\xb3\x41\x56\xed\xcb\xbb\xa7\xed\x64\x55\xd8\x0a\xe7\xb4\xda\x7e\xe7\x54\x5f\xa3\xdb\x83\xbd\x41\x93\x6e\xd7\xb9\x30\xc8\x9b\x0c\xce\x62\x2f\x86\x63\x6e\x63\x15\xca\x5e\xe2\xe6\x65\x46\x6b\xe6\x5a\x8a\x1d\xa7\xe0\xea\x70\x72\x7c\x79\xfd\xd7\xf0\xe4\x28\x8e\x49\xcd\xbe\x77\x02\x96\x3f\x39\xb3\x44\x63\xca\x96\xce\x39\x63\xe1\x07\xd7\x23\x7a\x4f\x94\x59\x48\x0e\xe5\x31\x34\x1d\x29\xe9\x1f\xbf\x29\x9e\x0b\x4f\x5d\xce\x53\xce\xf4\x8a\x8b\x1b\x21\x37\x24\x05\xe8\x8f\x03\x78\x62\xa8\xc9\xa1\xb7\xfa\xac\xe9\xdb\x9a\x0d\x98\xde\xe2\x95\xea\xb3\xa6\x12\x55\xad\xd0\x16\x55\xab\x74\x93\xa1\xf4\xa7\x40\x06\xc9\xc8\x35\xdd\x29\x4b\xc1\xed\x00\xfb\xb5\x9b\xe7\x26\x6a\x72\xac\x65\x69\x9d\x29\xef\x0c\xdb\x42\xa7\xce\xea\x1c\xa5\x95\xeb\xca\x86\x67\xfb\xe6\xee\xf0\xe4\xe2\x42\x6e\x7d\x94\x7c\x4b\xf1\xd7\xca\x83\xab\x9f\x01\xcd\x02\xc8\x5a\x65\x08\x5a\x95\x47\x03\x9f\x55\x06\x92\x7e\x0e\x4d\x73\xb9\x16\xb3\xdf\xb1\x70\x14\x32\xb1\xac\x95\x1a\xd1\x3a\xe4\xb5\xa4\x94\xd9\xe2\x5f\x4b\x9b\x33\x47\x80\x69\x69\x79\x89\xd6\xbf\x00\x1d\x3e\x53\x74\x51\x9d\x9f\x43\x01\x7e\xf5\x4a\x6a\xd9\x56\xa8\x76\x07\x53\x55\x50\xfd\xee\x65\x06\x1d\x39\x3f\x9d\x50\x99\x9a\x7c\x10\x85\xec\x42\x26\x2a\x28\xcc\xad\xbd\xf2\xb5\x86\x7b\xd5\xd9\xec\x4f\xf2\x2e\xbb\xc8\x50\x74\xaf\x3a\x03\xa1\xea\x1a\x29\x62\x6f\x47\x33\x8f\x85\x12\xa4\xf5\x03\x26\x00\xc6\x65\x14\x7b\x85\x55\x12\xa7\xb6\xac\x04\x10\xde\xe7\xf0\xc0\x4c\xa4\x04\xe3\x04\xfb\xef\xab\x96\x5d\x9a\x36\xc4\xc1\x88\x84\xef\x53\x74\x2a\xd3\x8a\x25\xeb\x0b\x8d\x81\x1a\xb9\x5c\x3b\x94\x7a\x22\xb3\x32\x2b\xff\x77\x79\x77\xd2\x84\x05\x44\x2e\x48\xfe\x3f\x84\x2e\x2f\xff\x75\x46\x82\x73\xe2\x7b\xae\x8d\xaf\x3a\x88\x8f\x59\xe4\x39\xc8\x61\x32\x1a\x32\xeb\xb5\x2c\xb9\x6d\x54\xc1\x7c\x82\x3d\x4f\x15\xbc\xa1\xe3\x4c\xa7\x95\xa3\x4c\x36\x5f\x5c\x4a\x70\x70\x90\x74\xf7\x0c\x07\x78\xc2\xb3\x7e\xa7\xa2\xdb\xda\xf6\x60\xf9\xf2\xfd\xfb\xc7\xab\x8e\x27\x73\x5e\x75\xf6\x1f\x1f\x2b\x4b\x29\x58\x76\x9a\x15\x58\x32\x10\xfb\x83\xcd\xbd\x8d\x92\x8e\xed\x6f\x6f\x5c\x75\x26\x2e\x15\x29\x36\xae\x3a\xbe\xba\x29\xec\xc2\xa5\x23\x8f\x9c\x31\x97\x86\x1f\x94\xce\x7a\xd5\xd9\x97\x5c\x1b\xc5\x60\xec\xb8\x41\x1e\x1b\x85\x8c\x87\x0e\x09\x82\xf7\x79\x5f\x57\xf1\xfe\xfe\xfd\x66\x13\xdc\x9d\xae\xee\x34\x38\xe2\x69\x28\xd5\x64\x5d\x8b\xbe\xfa\x3d\x6b\x9a\xcf\xb4\xa4\x15\x71\xa9\xe4\x9a\x94\x07\x28\xf1\x3b\x75\x8a\x22\x9e\x2b\x07\x7e\x31\x53\xd5\x99\x8c\x4a\xd1\xb0\xd0\x40\x7d\x7b\xd1\x15\x1d\xb2\xed\x91\x80\xf7\x64\x23\x4b\xea\x1a\x11\x5d\x95\x26\x3c\x2c\xaf\x2b\x3d\x4c\xbd\xea\x6c\x5c\x75\x84\x2e\x52\x51\x7d\x1a\xfc\x98\xd4\x29\x4a\xd7\xad\xe2\x24\x9c\xb7\x29\x15\xf5\x26\x20\x55\x36\xa0\x49\xc9\x9a\x00\x52\xa4\x28\x9f\x1d\x8b\xb9\x70\xbe\x66\x92\x18\x47\x39\x52\xb4\x56\x1c\xe6\xd4\xcf\xf8\xea\x39\x6f\x9e\x43\x35\x3e\x60\xca\x95\xde\xe8\xfc\x2a\x55\x89\x4a\x4e\x7c\xdf\xc2\x89\x2f\x9c\xf8\xc2\x89\x2f\x9c\xf8\xb6\x3c\xf1\xcd\x9e\xaf\x7a\x78\x42\x9c\xc8\xbe\x43\x3b\x46\xc2\x8c\xb6\x0d\xa7\xc2\x70\x2a\xbc\x66\xa7\xc2\xb1\x22\xa4\x6d\xc7\x70\x48\x0c\x87\xc4\x70\x48\x0c\x87\xc4\xab\xb4\xbf\x49\x19\x75\x40\x43\x77\xf1\x5d\xb6\xd4\x92\x2e\xf9\xea\xf1\xaf\x79\x87\x35\x16\xd4\xf7\xac\x96\x18\x4f\x6c\xfd\x12\x20\xc9\x46\xdc\x23\xb9\xda\x7c\xe6\xb1\xd1\xc3\x67\x59\x5b\x66\x50\xe3\x1b\xe1\xaf\xa6\xdb\x0c\xe1\x5c\x1d\xce\xd5\xe1\x5c\x1d\xce\xd5\xcd\x3c\x70\xae\x6e\xf6\x10\xce\xd5\x8b\x29\xe0\x5c\x1d\xce\xd5\xe1\x5c\x1d\xce\xd5\xe1\x5c\x1d\xce\xd5\xe1\x5c\xfd\x15\x9f\xab\x8b\xa2\x15\x1e\xbf\xaf\x47\x02\x79\xea\xbe\x51\x0a\x84\xef\x33\x38\x88\xe0\xc4\x1e\x4e\xec\xe1\xc4\x1e\x4e\xec\x5b\x9c\xd8\xef\x3d\x9b\x03\xfb\x82\x76\xda\x6a\xc7\x23\x37\xcd\x8c\x4e\x30\xc5\x23\x12\xe4\x80\x72\xc2\x1c\xb2\x8f\xce\x89\xcd\xa8\x5d\xbe\x47\x7c\x32\xe7\x80\x76\xed\x06\x67\x82\xf5\x75\x26\x68\xf0\xa5\x8f\x29\x8f\x02\x92\x2a\xd7\xe0\x7f\x30\x33\x78\xb4\x1c\xeb\xc5\x45\xac\x3f\x77\x67\x84\x48\x9f\xfe\x5f\x65\xec\xac\x8d\x1d\x14\x10\x0a\x43\x2f\x73\x76\x0f\x2e\x0b\x6b\xed\xb2\x30\x45\xae\xb6\x93\x4e\x10\x25\x0f\x0e\x10\xd3\x2d\x81\xe0\x00\x01\x0e\x10\x10\x25\x0f\xa7\xf9\x70\x9a\x0f\xa7\xf9\xf1\xcb\x59\x4f\xf3\xe7\xef\x37\x1c\xe7\xc7\x15\xc0\x71\xfe\x33\x3d\xce\x5f\xa2\x12\x0e\xc7\xff\x70\xfc\x0f\xc7\xff\xeb\xad\xd5\xc2\xf1\xbf\xf1\x83\xe3\x7f\x38\xfe\x5f\xcd\xf1\xff\xcb\x8a\x99\xaf\x3d\xdb\x58\xa0\x7e\x02\x27\xfd\x4b\x39\xe9\x87\x03\x7e\x38\xe0\x4f\x0f\xf8\xe3\xcb\x8b\xee\x07\xdd\xc1\x8e\x3a\xe1\x6f\x88\x36\x73\xac\x4c\xcd\xb1\x5f\xf2\x09\x92\xab\xfd\xaa\x16\xe3\x9f\xe9\x54\xfe\xb3\x64\x46\xfd\xe9\x33\x27\xde\xa6\xc7\x17\x2b\xfc\x99\x9b\x58\x7f\x46\x9c\xe4\xf3\xca\xe3\x22\x99\x9f\xfb\xdd\xf8\xb2\xa8\x88\xa6\xb7\xf7\xfd\x59\x98\xd5\xa9\x06\x93\xde\x8a\x5c\xd4\x42\xd2\x23\x72\xf3\x10\x5b\xbd\x19\x91\x70\xd6\x42\x93\x83\xf1\x62\xa9\xf1\x71\xbb\xfa\x4b\x1f\xb9\xcf\x51\x4b\x8f\x87\x38\x8c\x2a\x2a\xf3\xf5\x89\xfe\x62\x56\x65\x93\x79\xb1\xd0\x23\xed\xa4\xd8\x16\x67\xd9\xc6\xed\x93\x8d\x19\x25\x5a\x49\xe3\x06\x15\x24\xca\x44\xa1\xa6\x74\x79\xe4\xef\xb2\x38\x93\x57\x11\xcb\x95\x11\xdf\x46\x52\x52\x79\xd5\xfc\x2f\xb7\x81\x34\x3a\x5b\x92\xa6\x31\xe2\x9c\x05\xec\xd6\xf5\xe2\x95\x96\x3d\x6a\x6a\x51\x9a\xce\x61\x94\x56\x5e\x18\xf6\x7d\x1c\x4c\x58\x90\x96\x56\xb2\x43\x2c\x6d\x5a\x10\xd1\xd0\x9d\x90\x19\x8a\x2b\x6b\x5b\xa1\xb4\x64\x17\x6f\x5e\x0d\x9a\x18\x09\xb3\xd6\x36\x2b\x35\xa5\xc5\x0f\x38\xb1\x83\xf8\x4a\x14\x0b\x91\x89\x1f\x3e\x1c\xc5\x57\xe5\x59\xc9\x05\x71\xf2\x73\xa9\xce\x7d\xd2\x8f\xd2\x32\x7d\x1c\x8e\xcf\x02\x72\xeb\xfe\xda\x4f\xec\xfa\xc9\xe5\x79\x9d\x9a\x54\x99\xdb\x57\xab\x12\x06\x51\x72\xa3\x6a\xec\xd4\x53\x6e\x7e\x4c\xfa\x2c\xaf\xe9\x0b\xb8\xbc\x7e\x59\x5e\x82\x22\xc1\x26\x88\xe8\x01\x17\x2f\x62\x4f\x9c\xc8\x13\xfa\xa1\x78\x7a\x40\x65\x58\x16\x8f\x7c\xdf\x23\x42\x69\xc1\x5e\x06\xd9\x8a\x49\x6f\xb9\x42\x8a\xaa\xf7\x6f\x50\x62\xbb\x45\xa9\xf1\x36\x19\xc5\x7a\xc3\xae\xfe\xb4\x07\xd3\x53\xbe\x41\x87\x86\xf1\xd7\xf8\x48\xe6\x63\x21\x86\x4e\x8e\x2f\xaf\x0f\x8e\xbe\x0e\x4f\x94\xdc\x89\x6b\x70\xf2\x09\xff\xad\x06\x58\x9b\x2a\x02\xe6\x97\xbd\x7f\x23\x6f\x86\xc9\x79\x68\xc9\x99\x32\x3c\x32\x1a\x27\x9e\x0c\xcf\x0e\x73\x4f\xf4\x35\x3c\xa9\xb5\x58\x66\x34\x8c\x78\x13\x97\xee\x23\x6d\x3f\x9f\xe0\x5f\xfb\x68\x77\x67\x67\x6b\x47\xd5\x7b\x71\xfc\x25\xb6\xbf\x70\x22\xff\xa9\xbf\x40\xf2\x4a\x6e\x41\x29\x27\x0e\x72\x29\x3a\xc4\xf8\xe2\xcc\xfc\x44\x7f\xc6\xdf\xe8\xcf\xac\x8c\x49\x17\x45\x1e\x65\xab\x0c\xbc\xb9\x4b\x1a\x1b\xee\x6d\x42\x97\x04\xca\xfe\x92\xa0\x80\x81\xcf\x49\x3d\x36\x75\xe5\x56\xbd\xfb\x7f\xb8\x68\x87\xf6\x07\x4a\xbc\x6c\xae\x3a\x32\x4e\xb8\x23\x96\x87\x7d\x13\xf4\xaf\x3a\x1b\xe9\x2b\x9b\xc6\x1d\xb8\xea\xec\x5f\x75\xfa\xdd\xad\xee\x20\x93\xc0\xf7\xa2\x91\x2b\x14\x77\xa1\x13\x27\xbb\xc3\x8c\x0b\xcf\x55\x27\x7c\xf0\x75\x05\xc9\xd2\xdb\xc8\xa6\x70\x88\x47\x46\x52\x8b\xde\xcf\x66\x16\x2f\xc7\xd8\x0d\x7c\x97\x7e\x15\x1d\xed\xa8\x4f\xbd\x91\x4f\xe3\x72\x7d\x30\xf0\x11\x87\xe4\x27\x7e\x88\x13\x9a\xe9\x8c\xed\xed\xef\x8d\x06\x8d\xf5\x59\x10\x4e\xb0\x5f\x68\xac\x79\x44\x52\xda\x60\x91\xf1\xab\x72\xa0\xe3\xd3\x1a\x12\xff\x53\xab\x7c\xf2\x6f\x4a\xc2\xfa\x2f\xa6\xa7\xbd\x6a\x66\x6a\xcc\x3e\x1c\x1e\x9d\xff\xfe\x9d\xf9\x3e\x7f\xe9\xeb\xac\xb3\xcd\xbc\xea\x5c\x26\xbd\x7c\x7c\xec\xea\x0b\xa9\x75\xda\xae\x78\x97\x29\x46\x64\xf8\x7e\x32\x94\x85\x14\x52\x7f\x3f\x19\xfe\xfe\x9d\x49\x2a\xd6\x5f\x79\x5a\xf1\x26\xb5\x32\xfc\x4e\xba\xdc\x42\x51\x38\xc2\x64\xc2\xa8\xbc\x77\x6b\xca\x6a\x9a\x63\x25\x25\xf6\xe4\x54\x05\x8b\xe5\x62\x6b\xdb\x74\xbe\xec\xf2\xf2\x51\xde\x97\x65\x4d\xad\xc1\xa5\x86\xee\x62\x6a\xd3\x0a\x7b\xc2\xc2\x5a\x12\x84\xb2\x57\x72\xf3\xe0\x52\x87\xfd\x4c\x0a\x2f\x11\x07\xca\x56\x67\x7a\x9d\x4c\x31\xd4\x2f\xc7\x69\x25\x67\x5c\x2b\x37\x73\x57\x59\xa5\xe7\x39\xb2\xc8\x5d\x45\x9f\x6b\x47\x8d\xbd\xbb\xfc\x22\xfb\xd4\xe2\x5d\x30\x70\x17\xad\xce\x16\x2a\xde\x60\x9f\xbc\x94\x2e\x1c\x66\xda\xf4\x5a\xfb\xcc\xc3\xfc\xe5\xf6\xc6\xd8\x2b\x6f\xcc\xf8\x2a\x7d\x1a\x92\xe0\x16\xdb\x99\x36\xc9\x52\xc5\xc3\xf7\x29\x10\x95\x26\x2c\x18\x3f\xcb\xcd\xf5\x75\xbe\x2b\x57\x1d\xe9\xbd\x92\xb3\xd4\xa7\x56\xfa\x9d\x9c\x95\xbe\xc2\x6f\x66\xa6\xb2\xea\x0e\xfe\x4b\xb5\xf8\xa4\xb6\xea\xa3\x7e\x79\xd6\x8f\xfe\x75\xd5\x49\x54\xbd\x0c\xfd\x08\xa1\xf7\x25\x27\xee\xf1\xc5\xfc\x66\x51\x55\x37\xcb\x57\xde\xc5\x5f\x75\x0f\x7f\x75\x75\x17\x67\xb9\xdb\xf7\x17\x56\x67\xee\xee\xff\x29\x0e\x44\xb9\xbb\xba\x2b\x2f\xa8\x37\xee\x12\xaf\x4a\x5f\x71\x63\xf8\x94\x3b\xbd\x1b\x5d\x6b\x1f\x97\xa1\x6f\x4e\xb7\x32\xad\x30\xce\xb4\x4e\x86\x95\x8b\x5c\xcc\x8b\x9e\x91\xbf\xcb\xc7\x35\xb3\xe3\x0d\xba\x1c\x13\x74\x78\x32\x8c\xef\x0c\xd5\xbb\x46\xf3\xfa\x76\x46\x11\xc1\xf6\x18\x55\xdc\x74\x7f\x78\x32\xbc\x3e\x39\xbe\xfc\x71\x7a\xfe\xf9\xfa\xf0\xf4\xe4\xc3\xf0\x63\x93\xcf\x9d\x6c\x4e\x3f\x93\x87\xb2\xaf\x5e\xad\x7f\x9b\x3f\x29\xd5\x32\x2a\x74\x79\xfb\x44\xbb\xca\x67\xbf\x5a\xd1\x71\x1d\xd2\x41\x5b\xd9\xe0\x1b\xcf\xac\x9a\x99\x22\xa4\xde\xb4\xe9\x22\xd2\x58\xfa\x2e\xfc\xda\x72\x8c\xab\xef\x93\xf9\x52\xed\x60\x33\xff\xdd\xf4\xb9\x7e\xcd\x74\x8b\xfe\x5c\xf7\xdc\x57\x7f\xfc\x69\x63\x57\xdf\x56\x63\x20\xc5\xfb\x28\xe3\xf4\xfd\xdc\x1c\xc5\x37\xfb\xff\xcd\xd0\x51\x4a\x2c\xef\xbb\x60\x79\x07\xcb\x3b\x58\xde\xd7\xda\xf2\xae\x2e\xea\x06\xab\x3b\x58\xdd\xc1\xea\x0e\x56\x77\xb0\xba\x83\xd5\x1d\xac\xee\x60\x75\x9f\xe2\x4a\xfe\xc4\xa6\xf6\x36\x8e\xe4\x46\x43\xc0\x42\x0f\x16\xfa\x24\x31\x58\xe8\x51\x5c\x16\x58\xe8\xc1\x42\x0f\x16\x7a\xa3\x60\xb0\xd0\xab\x1f\x58\xe8\xc1\x42\x0f\x16\xfa\x35\xb6\xd0\xef\xcd\x67\xa0\x5f\x4c\x7c\xc2\x1a\x9a\x62\x17\x1d\xbe\x33\xe5\x94\x22\x05\x78\x38\x19\xd0\xa1\x1e\xf9\x49\xb9\x48\x23\x8e\xbe\x3a\x45\x7f\xf1\x62\x44\xfb\x82\x0d\x39\xf3\xd9\x71\xa6\x9a\x71\x1a\x59\x71\x0a\x36\x91\x5b\x16\xd8\xe4\xc0\x71\xc4\xb6\x50\x53\xf9\x36\x31\xe2\xcc\x65\xc3\x69\x6c\xc2\x99\x66\xc1\x01\x03\xce\x33\x70\x9b\x6c\xbb\x8a\xc0\x75\xd2\xfc\x3d\x77\xc3\xcc\xf3\xb4\xbf\x34\xb0\x94\x2c\xd2\x34\xb2\x78\xfe\x2b\x69\x25\xd8\xaa\xe4\xb3\xda\xe9\xf7\xbf\xe6\x27\x61\x25\x2b\xd6\x4e\x55\x29\xbb\xdb\x5f\xcd\xa1\x9e\x6e\x6e\xc9\xec\x0e\x0b\xa6\x9f\xce\xc6\x55\x27\x35\xf2\xa8\xbf\xf2\xd6\x1d\x9d\xa6\xc6\x64\x63\xee\x32\x63\xde\xca\x05\xb7\x21\x5f\x43\xd6\x32\xd4\xd4\xd6\x92\x15\x2e\x60\x2e\x01\x73\x09\x98\x4b\x5e\x85\xb9\xa4\xf4\x00\x1b\x55\x58\xfd\x35\xac\x8e\x08\x8a\x95\x45\xad\x6b\xa1\xab\xce\xfd\xa0\x3b\xd8\x94\xdc\xc5\xe9\x54\xac\xa6\xe0\xac\x66\xa4\x69\x96\x2b\x47\x7e\x53\xc2\x09\x3c\x85\x8f\x47\xf3\x35\xf8\x1e\xa6\x0d\xc8\x70\xea\x1b\x5c\x5b\x11\x09\x6d\xa7\x5d\x05\x65\x7d\xcb\xb0\x2a\x19\x35\xe6\x2a\xa3\x2c\xb4\x72\x37\xe1\xcd\xc2\xfd\x03\x36\xb4\xd7\x65\x43\x5b\xba\x27\x9e\x36\xd2\x89\x67\xfa\x86\x8a\xd8\x8d\x76\x4e\x02\xb7\x98\xd1\x6e\x4e\x06\xb7\xd2\x62\x66\x3a\x79\xaf\x68\xd0\xfc\xc4\xd1\x55\x24\x6e\x2a\x39\x6c\xf2\x5a\x9e\xbe\x17\x9a\xb2\x38\xe2\xb6\xb2\xfd\x5a\xe5\x57\x5c\x5f\x9e\xb7\xcf\xd1\xcd\x2b\xe7\x79\xab\xf9\xa8\xa8\x92\xe8\x2d\x4f\x04\x8f\x80\xe9\x6d\x2d\x2f\x7a\x6b\x44\x06\x37\x9f\x60\x9d\x53\xb8\x2d\x8b\xeb\x6d\xf9\x91\x2f\x75\x8c\x5f\xa5\x83\xf2\x4a\xe8\xe0\xe0\xe2\xb7\xb9\x66\x49\x4b\x62\xb8\x4a\x78\x5f\x0c\x33\x5c\x45\xf1\x2d\xa9\xe1\x16\x85\x31\xcf\x01\x58\x16\xb4\x17\x98\x67\x03\xb0\xe4\x1e\x26\x3a\xfb\x1b\x14\x2f\x4b\xa1\x79\x86\x52\x99\x77\x6f\x5d\xe2\xa0\x31\x09\x88\x4e\x32\xe8\xa2\x21\x45\x2c\x70\x48\x80\x42\x86\x26\xf8\x8e\x20\x79\x55\x0a\xfa\xaa\xaa\x43\x0e\x93\xb9\x83\xb8\x06\x14\x8e\x5d\x9e\x14\x8d\xa4\xb0\x24\x21\x09\xba\xaa\xc0\xcd\x6e\x4c\x18\x2f\x74\x8c\x81\x7e\xba\xd5\x45\x3f\x5c\xcf\x43\x37\x04\x85\x11\x55\x71\x06\x01\xc1\x9e\xbc\x16\x40\x20\xfd\xd1\xc9\x05\x92\x93\x50\xa8\x8a\x9e\x54\x0b\x2d\x31\x61\x85\x26\xe1\x72\x14\x46\x81\xc8\xc5\xa8\x2c\x6f\x69\x17\x00\x55\x6e\x6c\x2f\xa2\x40\xa8\xa4\x03\xb1\xa5\x4d\x1e\x66\x76\xbb\xfd\xf5\xb8\xfc\x47\xeb\xec\x41\x69\x54\x57\x7c\x57\x8f\xa5\xae\xff\xc9\x5e\xf8\xf3\x3c\x0f\xae\x4a\x36\xa5\x3e\x73\x0e\x68\xe8\x96\xee\x4b\xfd\x80\xdc\x92\x60\xb6\x8d\xa9\x85\x7e\x12\x77\x34\x0e\xe5\xbd\x29\x39\xf3\x0c\x73\xe2\xea\xc4\x86\xb5\xb0\xe9\x90\x9f\xad\x84\xa4\x3c\xfe\x35\xd8\xcd\x26\x9b\x48\xfd\xc5\xcb\xf7\xa0\xe9\x0e\xac\x7c\x03\x9b\x6c\x61\x85\xdc\x4c\xa6\x8c\x69\x79\x57\xbf\x90\xf9\xcc\x63\xa3\x87\xcf\xb2\xc6\xcc\x24\x19\x33\x1e\x66\x4f\x53\xc0\x0a\x90\xa9\x6f\x75\x56\x80\xf2\x6d\x7d\xa3\x8b\xaf\xea\xae\xbd\x5a\xa1\x65\x20\x6f\x05\xb6\xf2\x7a\x91\x52\xfc\xcc\x4d\x78\x99\xdd\xb5\x28\x9c\xd3\x8a\xd5\x8e\x3c\x7b\xf0\x30\xe5\x2c\xbb\xe2\x36\x27\x6d\x8d\x68\x68\x83\x78\x83\x2e\x4f\x8f\x4e\x85\x82\x14\x6a\xa3\x82\x3e\xb9\x46\x3f\xc5\x46\xfc\x27\xf9\xf3\x9e\x20\x5f\x05\xa9\x3a\x28\x1c\x93\xb4\x55\xe8\x96\x05\x6a\x63\x9e\x2d\x50\x6b\x09\x7c\x43\x24\xa7\x88\x93\x30\xb6\x73\xa0\xf7\xaa\x70\x21\xc3\xef\x08\xf1\x95\x90\x4e\x0b\xcc\x1a\x99\xdf\xa0\x51\x84\x03\x4c\x43\x42\x1c\x64\x7b\x98\xf3\x2e\x3a\x8c\x82\x80\xd0\xd0\x7b\xd8\xc8\xe7\xbd\xc5\x9e\xc7\x91\x4b\x43\x26\xaa\xcd\x96\x73\xd5\xb9\x89\x02\x1e\x0a\x11\x78\xd5\x41\xb6\x90\xb0\xa2\xa3\x5c\x26\x95\x63\xe9\x91\x10\x39\x8c\x70\xfa\x67\x88\x6e\xb0\x7d\xc7\x6e\x6f\xd1\x6d\xc0\x26\x62\xd4\x42\x1c\x84\x52\xc0\x87\x19\x23\xc2\xea\x2f\xb8\x9a\xeb\xea\x24\x7b\x4c\xec\xbb\x5e\x61\xde\xa0\xe4\x66\x91\x41\xbf\xbf\xb3\x9d\x7d\xf1\x92\x2e\x52\x4a\xf2\x95\xf5\x7d\xaf\xbf\x37\x68\xd6\xf5\x37\xe8\x27\x41\x3e\x53\x47\xb9\x3e\x73\x90\x9c\x1e\x91\x2f\x17\x83\x98\x4d\x9f\xd3\xfb\x51\x27\x58\x1a\xb1\xb4\xb6\x8c\x30\x75\xb2\x25\x31\xea\x3d\x88\xe5\x11\xf9\x32\x67\xda\x44\x59\x2b\x52\x17\x81\x22\x46\x6d\xa1\xcd\xe2\xf0\x4f\x8e\x12\x4d\xae\x3b\xf5\x73\x6c\x35\xfc\x1a\x25\x01\x3d\x8e\xbc\xcd\xf5\x7d\xf1\x7a\xd7\x6e\x2e\x1d\xe5\x96\x18\xbf\xf7\x62\xea\x6c\x65\xdf\xe9\x5b\x96\x1c\x37\x78\xdf\xab\x84\xc9\xc4\x3a\x14\x3f\xa8\x70\xb7\x38\x3f\xfd\x7a\x7c\xf9\xe9\xf8\xdb\xc5\xf5\xd9\xe9\xf9\xa5\xd9\x31\xe3\xdc\xb9\xbf\xb3\x63\x8a\x8e\xa9\x57\xac\xe5\x1a\x6d\x5e\x8d\xe6\x31\x79\x15\xa4\x31\x4b\x1a\xde\xb5\x56\x53\x66\x68\xfb\x75\xe5\xd6\x5f\xba\xa6\x7a\x57\x28\xb7\xf9\xad\x6b\x53\xce\xdd\x2b\xbf\x50\xf6\xec\xbc\x22\x99\x71\xc5\x51\x36\x10\x2c\x15\x4a\x47\x27\x17\x5f\x31\xff\xbb\x20\x94\x16\x00\x69\x85\x5a\xd1\xba\x41\x5a\x71\x09\x66\x16\x85\x85\x4c\x13\x6b\xe6\xb9\xfa\x0e\x47\x62\x91\x49\x3f\x98\x3d\x2e\x86\x23\x1e\x12\x8b\x62\x4a\x1f\x32\x19\xb4\x38\x3b\x52\x09\x72\xe6\x5a\xb1\x1c\x33\x7f\xdd\xe5\xd6\x34\xb6\xc7\xc4\xe2\xee\x7f\x88\x58\xf1\xfd\xec\x4b\x8f\x8d\xac\x5b\x6c\xbb\x9e\x1b\x3e\xbc\xcf\x16\x63\x29\x24\x7b\xdf\x2b\x22\x4a\x6f\xb0\xf9\xb6\xdb\xef\xf6\xbb\x83\x37\x7a\xed\xa4\x37\x2a\xe7\x6e\x8c\x8e\x5f\xa9\xdd\x5a\xf1\x6d\x45\x85\xd5\x75\xc4\x7b\xb5\x8a\x8b\x94\xcd\x72\xcc\x4b\xa3\x8b\xc5\x95\xe6\xd0\x37\x4b\x4f\xab\xdc\x9c\xb0\xb5\x70\xf5\x6c\x6e\x84\x7c\x83\x38\x21\xfb\x72\x7d\xf2\xfd\x5e\x6f\xe4\x86\xe3\xe8\xa6\x6b\xb3\x49\x2f\xdd\x28\x98\xff\x74\x39\x8f\x08\xef\x6d\xbe\xeb\xef\xec\x48\x29\xe9\x90\x10\xbb\x1e\x9f\xa6\x99\xce\xe6\xfc\xb8\x99\xd1\x98\x16\x03\x7b\x53\x97\x56\x5c\x1e\x77\x1d\x62\xe3\xba\x33\xc2\x0b\x95\x62\x51\x38\x58\x22\x00\xd6\x1e\xfb\x0a\xe0\x57\x89\x7e\x96\x2f\x46\xeb\xbd\xd6\x69\x37\x92\x95\xb6\x2f\x57\xda\x86\xb1\x6f\xd5\x67\x66\x5d\x7e\x6f\x77\x8b\x20\xb4\xb1\xb3\x71\x50\x56\xb0\xfe\xd6\x46\xc1\xb3\x97\xda\x48\x1d\xd9\x9e\x47\xc4\xb7\x5a\x45\xe5\xcb\x05\xa5\x3b\x92\x64\x79\x15\xaf\xd8\x14\x10\x70\xc4\xc4\x4e\x29\xe2\x24\x39\xaa\x3d\x3a\xb9\x48\xd4\xc3\xba\x73\xff\x25\xdf\x78\xf9\x4c\xef\xa6\xd4\xcb\x7f\xed\xef\xa6\xac\xf9\x76\x46\x8c\xd6\x8c\x5f\x4f\x67\x89\xe5\xff\x37\x9f\x87\x01\xc1\x13\x49\xb5\x25\xfb\x1b\x0b\xfa\xa8\xf8\x26\x09\xeb\xf9\xd7\xe3\xa3\xb6\xe7\xba\x89\x3d\xb7\xa4\xa4\xdf\xbf\xa5\xa5\xf8\x1f\xee\xef\xdf\x1b\x31\x8d\xc0\xe3\xa3\x1f\xb8\x34\xbc\x45\x57\x9d\xff\xf6\xf7\x55\x07\x75\xc5\x23\xf9\xe6\xdf\xd9\x83\xa6\xb8\x85\x17\x61\x74\xa3\x96\x7b\xdc\x32\x9e\x3e\x49\x03\x8d\x1e\xd1\x47\x12\x7e\x56\x28\x65\x66\x29\xe4\x2f\xdc\x73\x64\xb8\x89\xed\xd5\x7b\x89\x65\x83\x7a\x9e\x81\xab\x18\xf8\x79\xe5\x13\xbd\x5c\x0b\x2f\xf8\x79\xe9\x1f\xf8\x79\x81\x9f\xd7\x33\xf6\xf3\x02\x27\x2e\x70\xe2\x7a\x46\x4e\x5c\xe0\xbb\x05\xbe\x5b\xaf\xc0\x77\xab\x26\xf8\x1e\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xc0\x8d\xeb\xb9\x6f\xf2\xc1\x8d\x4b\xff\xc0\x8d\x0b\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xe2\x92\xc0\x8d\x0b\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xc0\x8d\x0b\xdc\xb8\xd2\x61\x03\x37\xae\xb5\x33\x6a\x17\x0b\x07\x37\x2e\x70\xe3\x9a\xc3\x8d\x4b\xaf\x7e\xd5\xe6\xf6\xc4\xfc\x2d\xaf\x43\xad\x3e\x93\x2c\xf9\xa0\xba\x69\x5a\xe1\xd9\xd7\xa7\x7a\xa2\x02\x4b\xf3\x87\xb3\x60\xd1\xd4\xfd\x15\x95\xb4\x20\xf2\xcf\xb6\xba\x6e\x32\xce\x3e\x80\xed\x46\xce\x92\xbd\x11\x5b\xff\xfa\xf6\xcc\x3c\x92\xd9\x21\x4c\xce\x14\x2d\xec\xbb\x46\x0b\x08\x0d\xf5\x91\x7d\xd2\x96\xa7\x1f\xd6\x52\xe7\x89\xea\x53\xef\x7c\x2b\x62\x6f\x88\xfc\xbd\x03\xe9\x9d\x03\x59\x99\x99\x5e\x94\x60\xdc\x6e\x60\x5e\x41\xc0\x53\xd0\x4c\xef\xb8\x4c\xef\x23\x30\xae\x53\x88\x6f\x40\x48\x6e\x3f\x28\x6b\x83\xe9\xd2\x50\xd6\x1a\xc3\xa3\xa1\x45\x45\x73\x0e\x70\xdd\xcc\xad\x1a\xe7\x65\x2c\xf2\x7c\x25\x4b\x99\x8d\xb5\x8e\x04\xd8\x77\x03\x32\x72\xe5\x31\x6e\xf5\x6a\x3f\x38\x1b\xd6\x69\x2d\x3a\x75\x57\xb7\x29\x19\x02\x43\x0b\x50\x6a\x87\x92\x15\x55\x1d\xa8\x53\x79\x46\x6a\xbc\xf3\x35\xc8\x09\x13\x0b\x69\xdd\x64\xa1\xeb\x4b\x3e\x6b\x72\x71\xe7\xfa\x97\x5f\x2e\xbe\x93\xc0\xbd\x7d\x48\x8f\x1d\x64\x59\x67\x81\xcb\x02\x37\x7c\xf8\xea\x52\x77\x12\x4d\x92\x93\x44\x5d\x5e\xfc\x5a\x3f\x9f\xcf\x01\x63\x0e\xe4\x68\x47\x6d\xd3\xbc\xa2\x2a\x25\xb2\x30\x23\xcd\x13\x67\xed\x36\xa7\xfd\xfb\x52\x97\x86\xc7\xc7\xcc\xab\x19\x0f\xdf\xf3\x55\x57\xf9\x13\xe4\x9c\x0e\x1a\x3b\x17\xc4\x6d\x9a\x76\xb0\x5f\x3d\x3d\xeb\x0e\xfd\xcb\xd2\x83\x4b\x76\xf6\xf7\x0c\x4e\x6b\x9f\xa7\xeb\x44\xd9\x5e\xb2\x7c\x02\x3e\x5f\xef\xf0\xf2\xf6\x1a\xe6\x21\xd4\xfd\xaa\x92\xa8\x7d\x9d\x34\x0f\xa1\xda\x1b\x25\x0e\xbc\x9f\xf8\xc1\x98\x0f\xa5\xb7\x71\x56\xd5\x6b\x6c\x1f\x0c\xf6\xf1\x7e\x77\x2b\x43\x3e\x8e\x92\xfb\x38\x3d\x12\x5a\xb1\xe0\xb0\x42\xd3\x74\x67\xa6\x48\xbc\x56\xa4\xad\x94\x70\x6e\x85\x0f\x3e\xe1\xef\xe5\x6d\x0e\x14\x7b\xc3\xb3\xab\xe9\x66\x9d\x6a\x53\xac\x54\x8d\xde\x1b\xd3\x9b\x47\x93\x09\x0e\x1e\xae\xb1\xef\xee\xc7\xd6\xc8\x72\xeb\xcc\xff\xa7\x9b\xf8\x49\xa4\x52\x46\xef\xa8\xdf\xdf\xdc\xd5\x8f\xcf\xd4\x19\xd6\xe6\x4e\x5f\x3d\x8e\x38\xc9\x4a\x2e\x23\x4b\x3c\x0c\x39\xd3\x79\x99\x49\xd7\x5c\x1e\x7a\x75\x9c\xca\x73\xfe\xbc\xbd\x1a\x59\x56\xbc\x3a\xde\x27\x4b\xc3\x2c\xa6\xb9\xaf\xe3\x82\x45\x5f\xa9\x2d\xe3\x6b\xb6\xb8\x26\x26\x8d\x62\x0b\x4c\xa3\x85\xb2\x38\x6c\x6f\x97\x9b\x1c\xe4\xd2\x96\xb1\x01\x67\x69\x3a\xbd\x3f\xa6\x23\x97\xfe\x72\xe9\x48\xce\xb6\xfb\x41\x77\xb0\xa3\xf6\xc7\xe5\x63\x74\x92\x5e\x58\x51\x32\x4a\x71\x31\xb2\xcc\x39\x2e\x11\x93\xf9\xb5\x45\x39\x0a\x92\x2b\xfe\x8d\x21\xcf\xd5\x94\x1b\x74\x39\x60\xb9\x24\x89\x65\x64\xca\x8c\xaa\x02\xd9\xfc\x1c\x9a\xa5\x5f\xa1\xed\xc7\x36\xb2\xbc\x15\x67\x51\x43\x17\x39\x4d\xab\x90\xb2\xaa\x8d\xf9\xa3\x95\x52\xaa\x3e\x61\x5c\xa5\x6e\x11\xce\x6e\x33\x6a\xba\x3e\xaf\xd9\xa5\x41\x9b\xb4\x5d\x30\x90\xa9\xe7\xbb\x1c\xd0\xf0\x5f\x4f\xe5\x1f\x75\x7c\xe6\x52\x53\x24\x1a\xd7\xfd\xa1\xcc\x95\x82\xe2\x2f\x4e\xec\x80\x84\x2b\xba\x0d\xb0\xbc\x96\x39\x6e\x32\x34\xa7\x5c\x65\xc1\x33\xf5\x25\xbf\xc3\x8f\x9f\xeb\x7b\x5e\x5c\x3a\x8a\xb7\x6b\x35\xcd\xd3\xdf\xdc\x1c\x7d\x47\x5e\xbe\xc6\x2b\x87\x7c\x9e\x36\x57\x37\x45\xce\x8c\x7b\x42\xcb\x6b\x95\x73\x49\x45\x39\x18\x4f\xfc\xd5\x0e\x51\xed\x4d\x91\x3a\xba\x63\x4e\xd3\x5e\x83\xd5\x19\x18\x06\x8d\x72\x9c\x58\xfc\x92\xad\x59\x90\x45\x9b\xd5\x42\x97\x4f\xbe\x35\x71\x1a\x29\x6e\x93\x74\x6f\xe2\xe3\x23\x8e\x42\x26\x4a\x8c\xfa\xfd\x2d\x5b\xea\x0b\x2e\xa3\x96\xeb\xc8\x07\xc4\x52\xcf\x53\x9c\xc3\x9c\xab\x37\xe9\xf4\x78\x83\x3e\x91\x40\xaa\x22\xb9\xc4\x49\xb0\x91\xe5\x29\x1b\xa6\x59\xa4\x1a\xf9\x42\x51\x97\x63\x97\xa3\x31\x96\xad\xba\x21\x08\x3b\xd8\x0f\x89\x23\xd4\xe3\x07\x16\x21\x7b\x2c\xe5\x2d\x71\xc3\x31\x09\xd2\x28\x82\x34\xbb\xf4\x92\xf4\x70\x44\xed\xb1\x4b\x47\xd2\x8f\x2b\x87\xd4\x49\xa3\xba\xe6\x80\x56\xb6\x59\xcf\x90\x4e\x83\x85\x1d\x25\x77\x96\x2c\xe2\xb3\xe5\xab\x4a\xd7\xf2\x2c\xa5\xe7\x24\x48\x69\x3f\x96\x67\x62\x2f\x2e\x47\x8b\xba\x1c\x5b\x37\x71\x9e\xda\xb5\xb9\x18\x4b\x7b\x19\x24\xb4\x0c\xc1\x9a\xa6\x87\x2c\x5d\x13\x69\x3e\xc6\x86\x42\x92\x1b\xea\xc5\x5a\x87\x6b\xd4\xa0\x15\x0e\x6e\xdd\x9e\x2c\x6f\x96\xac\xbd\x27\xb5\x0a\x2b\xea\x67\x68\x9b\x6b\x75\xca\xb6\x13\xd5\xe6\xc3\x92\x2b\x97\x16\x6e\x44\x2c\x18\x0a\xcb\x9a\xa8\xdf\x94\x86\x07\xf9\x01\x9b\x90\x70\x4c\x22\x39\x7b\xd5\xee\xf1\x4f\xb1\x7f\xdf\xfe\xb3\x2a\x11\xb7\x03\xec\x93\x7d\xf4\xa7\xd8\xb6\x67\x23\x86\x74\x4d\x60\x82\x7c\x06\x26\x48\xf5\xbc\xf2\x42\x3d\x35\x6f\x8f\x4e\x2e\x94\x15\x2c\xb5\x92\x18\x4e\x2a\xca\xd9\x34\x9f\x20\x99\x9e\xcf\xd3\xc8\x39\x6d\x3b\xab\x52\x95\x99\x42\xa7\xa3\x58\xa6\x5e\xf5\xe0\x89\x2d\xa5\xda\xfc\x48\x09\xea\x1e\x78\xbe\x4b\x89\xb2\x77\x0a\x85\x22\x6d\xa3\x58\x76\x87\x65\x26\xd5\x32\x4b\x27\x1f\x9b\x7f\x59\x76\xe6\xdd\x03\xb7\x43\x0f\x59\x3f\x11\x25\x61\xd7\x66\x01\xe9\x72\x36\xc1\xbf\x6c\x46\xe9\xfb\xad\xcd\xb7\xbb\x7b\xff\x33\x97\xc6\xf5\xef\xb7\xbb\xae\x7f\x2d\xdd\xb5\xaf\x05\xbe\x5c\xcb\x29\xf0\xfe\xaa\x33\xe8\x6f\x6e\xa3\xdd\x9d\x9d\xad\x8c\xaf\x79\xea\xe6\x67\xf4\x67\x86\x0b\x81\x93\x03\x50\x3b\x34\xdc\xc4\x67\xb9\x80\xb6\x68\x8f\x2c\x31\x4f\xa7\x06\xea\x3a\x31\x54\xec\xe4\x50\xa5\x2b\xf4\xb2\xe0\xcd\xa7\xaa\xe8\x35\x29\x1d\x65\xa8\x2b\x6e\xd4\x3d\xe5\xf1\x7c\x7e\xff\x8f\xff\x9e\xb9\xca\xf6\x7f\xf4\xe2\x84\xe3\x30\xf4\xe3\xd4\xc5\xe2\x52\xfe\x91\x42\x01\x15\x56\xba\x6c\x7e\xd3\xe4\x55\x57\x58\xde\x34\x96\x2d\xc5\xb4\x6a\xd5\x95\x92\xb7\x7e\x65\x4b\x31\x44\xa1\xb4\xb4\xbb\xbf\xde\xcb\x3e\x74\xf5\xb8\x66\x51\xd7\xcc\x5f\x86\x60\xc7\xbf\xc2\x00\x1f\x04\xa3\xac\x29\x3a\xae\x2b\x86\x31\x09\x4b\x94\x88\x4c\x62\x71\x9a\xf6\xe9\x12\x36\x90\x02\xcc\xa0\xec\x51\x43\xf5\x5a\x9f\x36\xcb\x11\x32\x2f\xfa\x2f\x91\x50\x4e\xc0\xf2\x37\x55\xaa\xbe\x1c\x7c\xf9\x52\x7c\x8c\x1d\xa7\x34\xf1\xc9\xf1\xe5\xf5\x5f\xc3\x93\xa3\xeb\x8b\xe3\xf3\xef\xc3\xec\x8d\xc9\x08\x05\x11\x3d\xe0\xdf\x38\x09\xf6\xd1\xd6\x56\xbe\x8b\xc5\x8e\x67\x43\x59\x90\x19\xce\x52\x72\x09\x34\xaa\xb9\xa5\x17\xd5\x5d\xcc\x8c\x1a\x5d\x08\x5d\x56\x7d\xe1\x52\xe8\xa5\xb4\x41\xdb\xdb\x95\xd8\x95\xb3\xee\x98\xde\xf3\x5c\xec\x7b\xf2\x3c\x55\x2a\x07\x9b\x15\x9c\x33\x79\x47\xda\xb4\x63\x02\x0b\x72\xb3\x26\xeb\x62\xbb\xd7\xaf\xca\xc6\x6b\xf3\x25\x27\x13\xea\x57\xed\xab\x5d\xe6\xeb\xbc\x95\x4d\x50\xee\xce\x9d\x0b\x6c\xf9\x4f\xe1\xad\x76\x11\xde\xcc\xbb\x74\x57\x3b\x75\x57\xb8\x75\x0f\xf2\x31\xe1\x24\x70\x99\x53\xf9\x7a\x8a\x6b\x77\xd1\x31\x7c\x90\x55\x94\x2b\x63\xf5\x9e\xd7\x50\x2d\x61\x14\x8c\x19\xff\xdd\x88\x42\xc8\x4e\xe6\x5c\x7c\x42\x7e\x49\x98\x19\x2b\x97\x46\xa1\x22\xa3\x8e\x34\x70\xb9\xbc\x68\xa3\xd4\xbd\x4c\xa1\x73\xbb\xdb\x54\x09\xe8\xe2\x51\x57\x55\xca\x06\xdb\xdf\xd4\xcd\x66\xd0\x66\x37\x5c\x55\xe3\x6c\x1b\xd7\x6a\x55\x04\x36\x98\xd9\xdf\xb3\xd9\x60\x86\x24\x90\x64\x6d\x2e\xa3\x1f\x03\x6c\x93\xb3\xec\xf2\x4f\x43\x5f\x9e\xaf\x4b\x49\xbd\x02\xfc\x06\x1d\xd0\x07\xa5\xb1\x23\x97\x0b\x78\x9b\xb8\x9c\xe3\x1b\x8f\x20\xcc\x91\xc7\xe8\x08\x61\xf3\x43\x28\x86\x9d\x50\x85\x1c\x73\x84\xd1\x76\x7f\x1b\xf9\x22\x37\x0e\x51\xcf\x4c\xb7\x69\xa6\xdb\xec\xf7\x11\xa3\x08\x27\x60\x9c\x98\x7d\xcb\xf6\x46\x7a\xdb\xf0\x97\x6a\xec\xc2\xa2\x40\x73\x12\x20\x09\xe8\xce\x63\x78\x9b\xe0\xa7\xad\xa6\xc1\x4f\x53\xc3\x7a\xb2\x0d\x29\x8f\xd1\x29\x65\x12\xc8\xc7\xe1\xa8\x5f\x55\xdc\x4e\x3d\xbd\x40\x6d\x21\x33\x3a\x95\xb4\x87\xed\xe6\xe0\x9f\x00\x67\xd1\x4b\x24\x1e\x4d\xd3\x23\x24\x1e\xe3\x1c\xfe\xd7\x54\x50\xe1\x3e\xb2\x07\xde\x23\xe0\x3d\x02\xde\x23\xe0\x3d\xf2\x5c\xbd\x47\xc0\x3d\x04\xfc\x3f\xc0\xff\x03\xfc\x3f\x10\xf8\x7f\x80\xff\x07\x02\xff\x0f\xf0\xff\x00\xff\x0f\xf0\xff\x78\x69\xe6\x39\xf0\xff\x00\xff\x0f\xf0\xff\x48\x9e\x82\xff\x07\xf8\x7f\x80\xff\x07\xf8\x7f\x80\xff\x07\x02\xff\x0f\xf0\xff\x00\xff\x0f\xf0\xff\x00\xff\x0f\xd8\x60\x82\xff\x07\xf8\x7f\x80\xff\x47\x52\x30\xf8\x7f\xb4\xf7\xff\xf8\x49\xf0\x3d\x91\xbc\x21\xbb\xc6\x25\xc9\xfa\x4c\x28\x71\x3e\x40\x36\xa6\xe8\x86\xa0\x88\x13\x07\x85\x0c\xc5\x9b\x46\x82\xb0\xa8\xf2\xd6\x1a\x33\x1e\x12\x07\xfd\x10\xa5\xa1\x13\x12\x22\x97\xf2\x10\x7b\x9e\x32\x6b\x57\x0d\xee\x17\x79\xee\xea\x86\x64\x62\x1c\x04\x66\xd2\x89\x4e\x54\x5b\xb1\x2b\xc8\xb9\x54\x9f\x68\x7a\x4e\x54\xcd\xf5\x92\xad\x31\x91\xef\x69\xb5\x86\xed\xba\x4d\x8d\x05\x39\xd9\xba\x65\x39\xd9\xc9\x8b\x77\x52\x95\x09\xf7\x9a\xaa\x72\xa2\xbd\xb4\x37\xa8\xd2\xe0\x8c\x66\xbc\x93\xac\x59\x5e\x53\xc8\xa0\xf8\xc8\x32\x27\xcd\xfe\xf5\x78\xd5\xb9\x23\x0f\x57\x9d\xfd\xab\x8e\x43\x1c\xd7\xc6\x21\x71\xae\x3a\x1b\x57\x9d\x58\x76\xc8\x57\xc7\x7f\x47\xd8\x93\x8f\xa5\xac\x95\xcf\xd4\x2d\x24\xf2\xa1\x92\x41\xf2\xa9\x21\x86\x3a\xbf\x33\x17\x6b\x15\xbe\x5e\xcd\xb0\xe6\xbe\x13\x2a\xd7\x73\x50\x9d\xae\x83\xe6\xd5\x77\x50\x23\x9d\x07\x35\xd5\x7b\xd0\x0c\xba\x0f\x6a\xa6\xff\xa0\x69\x3a\x10\xca\xea\x41\x75\xb6\xea\x7c\xaf\x73\x25\xb6\xb2\x5a\x27\x59\x6a\x6c\xd7\x49\x9a\x0a\x0b\x36\xaa\x32\x31\x22\x43\x69\x91\xf3\x27\x5f\x6f\xd9\x7d\xd4\x71\xb6\xde\x98\x4d\x48\x4f\x66\xeb\xa9\x43\xf6\xae\x69\xea\x55\xbf\xa2\xa5\xc5\xac\xf3\xd3\xe9\xc5\x65\x99\xb5\x05\x4d\xb1\x76\xa0\x69\x16\x0f\x24\x45\x4c\x11\xb1\x2b\x8a\x51\xb6\x11\xb1\x5c\xba\xe2\xbb\x9d\x14\xec\x33\x66\xa3\x87\x67\x07\x5f\xbe\x9c\x1e\x5e\x9f\x1f\x9c\x7c\xac\x6e\xb9\xfa\x52\x31\x2b\xf8\xe1\xf0\xe8\xbc\xf8\xc5\x50\x6a\x83\xeb\x4a\x01\x75\x86\x39\xff\xc9\x02\xa7\x38\x01\xd2\xea\x7f\x1c\x1f\x7c\x3f\xbe\x3e\x3b\xb8\xb8\xf8\x71\x7a\x7e\x34\xad\xfe\x7c\xb1\x15\x2d\x28\x4c\x97\x6c\xe3\xbe\x5e\x7e\x2b\x99\x94\x99\xc3\x02\x91\xa4\x5f\x9a\x28\xdb\xf0\xaf\x97\xdf\xa6\xb5\xf9\xeb\xe5\xb7\xda\x96\x56\x36\xa5\xfc\x9d\xa1\xe4\x16\x8d\xe2\x68\x9a\x25\x04\xd5\x18\x3b\x90\x3e\x0f\xdb\x47\xc9\x2d\x04\x65\x49\xb4\x46\x6c\x7a\x3f\xe5\x12\x48\x4d\x69\xf7\xed\x5e\xc1\x2a\xd2\x48\x05\x46\x35\x77\xd4\xa3\xfa\x7b\xea\xab\xb4\xd0\x69\x26\x5f\x54\x73\xbc\x11\xff\x2a\xee\xf7\x88\x7f\x19\xdc\x71\x6e\xca\xda\x66\xde\xf3\x51\x99\x2c\x2e\xc7\xa6\xae\x75\xe3\x96\x62\xbb\x59\x8e\xf8\x5e\x3d\xe6\x87\x53\x0b\xda\x6c\x54\x92\xc0\xc0\xfa\xa2\x84\x46\xda\xa8\x28\x12\xda\xd5\x25\x39\x37\xe5\x53\xa7\x50\xca\x3d\x0e\x7a\x9e\x7b\xd3\x2b\xcf\x10\x17\xe7\xb9\x37\xd6\x84\x09\x0d\x63\x6a\xa9\xa2\xb0\xca\xa4\x71\x79\xbf\xe4\xfd\x7d\xf2\x72\xae\xbb\x69\x05\x06\x11\xed\xe9\xf4\xdd\x62\x7a\x2b\xab\xce\xf8\x85\x31\x79\x31\x12\x25\xc5\xa5\xc3\x93\x61\x25\x34\xad\xeb\xb2\x5e\xd6\x8c\xf0\xbd\x68\xe4\xe6\x2f\x49\x9c\xa2\xa8\x30\x3f\xec\x05\x77\xc4\x0a\x19\xf3\x78\x2f\x53\x8e\x65\x53\xb7\x44\x6d\x49\xbf\x8d\x14\xa0\x5f\x18\xf3\xc5\xfe\xb4\xe2\x2b\xad\x6c\x50\x1b\x62\x5c\x0e\xde\x2a\x5d\x36\xe2\x97\x67\xc3\xa3\xe2\x0b\x7d\x1b\x59\x15\xa7\xed\xb4\x5e\x73\xf2\xc5\xa5\xd1\x2f\x1d\xc4\xb0\x8f\x1e\x73\x07\x8a\x45\x6f\x89\x92\x0d\x4c\xb9\x91\x4f\x8d\x48\x95\x5d\x4e\xfd\xea\x2c\x7a\x6d\x72\x17\x8c\x88\xc5\x0b\x67\xb3\x9f\xa8\x42\x4e\xc9\x61\x16\x5f\xa7\x64\x6e\xa8\x8f\x16\x03\x77\x89\x1e\x3e\xe5\xf3\x4f\x2f\xbb\x20\xef\xa6\xc9\xba\xe9\x45\x16\x05\xdf\x34\xa1\x37\xbd\xcc\x82\x04\xac\x93\x7e\xcd\x47\xb4\x98\xbb\x81\x18\x9c\x5e\x7c\xa5\x60\x6c\x02\x81\xd3\x8b\xaf\x02\xc5\x12\x2f\xc0\x99\xef\xaf\xdf\x2b\xdd\x2c\x86\x0f\x3e\x91\x0e\xb3\xe9\x45\xf6\x33\xdd\x6a\xd3\x2a\x30\x27\xb3\xfa\x33\x66\x8e\x92\xf7\x0b\xb9\xc0\xa4\x55\x8c\x4e\x8b\xe6\x4d\x0b\x1f\x88\x0d\x50\x65\x9e\xd9\xd9\x40\x81\x7c\x5c\x40\xb3\x38\x19\xd4\x3e\x72\xa4\xc8\xe7\x59\xd9\x42\x9d\xd4\x17\x62\xc1\x5d\x7c\x4b\xea\x06\x27\xbd\xeb\xa5\x2a\x88\x24\x89\x58\x41\x0b\x8a\x29\x69\xe8\x6e\xdd\x66\x76\x18\x1e\xd7\x35\xae\xd4\x99\x12\x1b\x79\x66\xb7\xf4\xb0\xce\x49\xdb\xc5\xaf\xa8\x45\x2c\xa5\x9a\x1b\x97\x5a\x2e\xb2\x6c\xbc\x8b\x95\xef\xfe\x2c\x31\x18\xad\xc2\x3d\xea\xe6\x75\xd3\x60\x8f\xe5\x85\x64\x2c\xe6\x93\x14\x66\xf6\xf3\x9c\xd2\x86\xbd\x2b\x7f\xd4\xb4\x07\x27\x4d\x69\x4c\x5f\xcf\xb8\x99\xe8\xb9\x1d\x3a\xc1\x91\x11\x1c\x19\xc1\x91\x11\x1c\x19\x65\x7f\xcf\xcc\xc0\x07\x47\x46\x70\x64\x54\xd6\x70\x38\x32\x82\x23\xa3\xda\xa2\xe0\xc8\x08\x8e\x8c\xe0\xc8\xa8\xf0\x83\x23\xa3\x19\x06\x15\x8e\x8c\xd2\x11\x81\x23\xa3\xe6\x65\xc3\x91\x51\x79\xb9\x70\x64\x04\x47\x46\x70\x64\x04\x47\x46\x70\x64\x04\x47\x46\x70\x64\xf4\x0a\x8f\x8c\xfe\x40\xe8\xf7\xc6\x1f\xf1\xa9\x51\x67\x1f\x3d\xca\x33\x24\x65\xe8\x7f\x3f\xe8\x0e\x76\xbb\x7d\x4b\x9e\x0b\x74\xf6\xb3\x51\x4c\x1b\xd9\x84\x7b\xdd\xbe\x15\x60\x6a\x8f\x49\xd0\x57\xe7\x04\x76\x65\xee\x3d\x59\xed\x1f\xb2\xe6\xce\xe7\x3d\xae\xbf\xe7\x90\xde\x32\xdd\x82\x8e\xa8\xa3\x9f\x34\x67\x82\x7f\x9d\x7f\x3e\xd6\xc9\x44\x51\xfd\xee\x66\x77\x53\x37\x41\xbc\x54\x15\x1b\x09\xc4\xeb\xa4\x6f\xb2\xb0\xee\x20\x6e\xdf\x20\x29\xd7\x21\x7e\x40\x6c\x1c\x92\xea\xd2\xd3\x24\xd3\xeb\x18\x2c\xb2\xc1\x9b\x8b\x2c\x6c\xab\xae\xb0\xad\xee\xa0\xbe\x30\x91\x20\x53\xdc\x76\x75\x71\x83\x6e\xbf\xdb\x9f\x56\xdc\x56\xb6\xb8\x9d\xee\x4e\xf2\x6d\xac\x29\x83\xb8\x37\xa5\xdf\xdd\x77\x99\xb2\xf7\x16\x38\x8a\x7b\xdd\x41\xbf\xac\x9d\x8b\x9d\x45\x7b\xdd\xc1\xd2\x67\xea\xbb\x05\x8e\xca\xbb\xee\xdb\x65\x35\x57\x23\x84\x4e\xa0\x49\x3d\x53\xbc\xe0\x31\x58\x6c\x76\xc5\xfc\x96\x53\xe9\x6d\xf7\x97\x2c\x5f\x4e\xb3\x7e\xfc\x74\xc7\x7c\x3a\x28\x7d\xba\x59\xfa\x34\x29\x77\x37\x7e\xaa\xa3\x3c\xcd\xfa\xe2\x66\x7e\x3e\xae\x6e\x62\xdf\x2c\x6a\xcb\x9c\x45\xe5\xa5\x66\x92\x14\x90\xf2\x88\xd9\x77\x24\x30\xf1\x52\xc3\xe5\xbf\xe4\xd0\x0a\x28\xd2\xed\x95\x7f\x6c\x9a\x7f\x6c\xa5\x7f\xbc\xed\xf6\x8d\xbf\xf6\xba\xfd\xdd\xcc\x5f\xef\xd2\xbf\xde\xa9\x94\x7f\x20\xf4\xef\x0d\x5d\xdd\x60\xb5\xd5\x6d\xce\x59\xdd\xdb\x4c\x75\x6f\x33\xd5\xb5\x6c\xca\xd6\xf3\x69\xca\xb6\xd9\x94\x55\x54\xb8\xb3\xea\x0a\x77\x57\x5d\xe1\xdb\x15\x57\xb8\x37\xeb\x6c\x32\xca\x78\xb7\x9a\xb5\x58\xc0\xa1\x1f\xca\x19\x40\x6b\xa4\xda\xd4\x69\xaa\x70\x3b\x89\x44\x20\xa1\xed\x74\xf6\x11\x8d\x3c\x4f\x15\x2c\x54\x52\xec\xbb\x85\x67\x1e\x09\xe3\x4c\xa8\x83\x1d\x27\x20\x9c\x2b\xd9\xd1\x4f\x55\x0b\xd4\xc1\x94\xd1\x87\x09\x8b\xb8\x25\x54\x67\x91\xe0\x16\x7b\x9c\x24\xaf\xa3\x70\x4c\x68\xe8\xda\x52\xa1\xb6\x42\x76\x47\xa8\xf5\x93\xdc\x8c\x19\xbb\x13\x89\xc3\x20\xca\xa4\x4d\x74\x6f\x6b\xc2\x1c\xa1\x07\x77\x7e\xe8\xc4\x3a\x91\x4d\x82\xd0\x72\xdc\x40\xbc\xfa\xd7\xd9\xf9\xf1\x87\xe1\xff\xbe\x3e\x3b\xb8\xfc\xf4\xef\xc4\xca\xa6\x1b\xdf\xf3\xef\xdc\x24\xd3\x48\x6e\x83\x2c\x9f\x04\xd6\xdf\x8c\x17\x5a\xa9\xad\x8f\xe5\xe5\x32\x3f\xec\xd9\xd4\xed\xdd\xb8\xd4\x4c\x2f\x36\x4b\xe5\x19\x48\x68\xcb\x0c\x94\x84\x5d\x27\xce\x42\xe8\x2d\x0b\x6c\x62\x49\x4a\x13\xec\x79\xcc\xc6\xd2\xa6\x26\xb2\xff\xf9\x67\x92\xea\x9e\xd0\xd0\xfa\xdb\x57\x23\x1d\x3f\xbd\x25\x38\x8c\x02\x62\x8d\x70\x48\xe4\x9b\x4f\x0f\x3e\x09\xbe\x27\x5c\x89\xef\xc5\x28\x6e\xe8\x49\xf0\xf1\xeb\xc5\xc1\x7b\x73\x58\xe5\x11\x82\xe5\x47\x9e\x67\xf9\x01\x53\xfc\x0c\x0e\xc1\x8e\xe7\x52\x59\xfb\x56\x7f\x12\x27\x95\xdb\x93\x80\x48\xae\x0f\x31\x4b\x3a\xb6\x1f\xbd\xdf\xe9\xf7\x27\x1b\x8a\x40\x42\xfc\xfb\xab\xbb\x41\xfc\x31\x99\x90\x00\x7b\x16\x0f\x59\x80\x47\xe4\xfd\xe0\x63\x32\xd6\x13\x7c\x47\x2c\xd7\xd7\x26\xc7\x28\x74\x3d\xcb\x1e\x63\x57\xce\xc7\xcc\xe7\xd6\x06\x1c\x7d\x00\x22\x6b\xa3\x49\x29\x01\xc1\x8e\xc5\xa8\xf7\x60\xf9\x2c\x08\x33\xa3\x21\x76\xae\xde\xbd\x1c\xff\xec\xe0\xf1\x30\x20\x78\xe2\xd2\x91\x78\x47\x13\x96\x71\x8f\x58\x9a\xce\x23\xd7\x5b\xb5\x0f\x2b\xf4\x77\xd0\x37\x3a\xbc\xf9\xb1\xac\xbb\x9b\x69\x77\x43\x8f\x5b\xb6\xeb\x8f\x49\x60\xf1\xc8\xd5\xdf\xe7\xf2\xcb\xc5\xf5\xf1\xe1\xd1\xa7\x63\xf1\xff\x17\x07\xd7\x3f\x86\x97\x9f\xae\x0f\x8e\x2f\xae\x07\x9b\x7b\xd7\x1f\x0f\xbf\x5e\x5f\x7c\x3a\xd8\xdc\xd9\xdd\xa8\x4c\xb7\xb9\xb3\x1b\xa7\xdb\xda\xdb\x2e\x4f\x77\xf8\xe9\xe0\xf0\xd3\xc1\x66\xff\xfa\xec\xf4\xcb\x3f\x07\x5b\xfd\x1d\x23\xd9\x79\xa3\x4a\xcf\x1b\x55\x79\x5e\x59\x61\x3c\x06\xf7\x52\x59\x4c\xfe\x92\xc7\x12\xfa\xb3\x36\x5b\xaa\x2a\x8b\x3e\x09\x13\x10\xa7\xd4\x5a\x35\x21\xfd\x80\xfd\x7a\x48\xa1\x88\x50\x31\xb3\x2c\x87\x07\x85\x65\x5c\x58\x27\x3f\x5c\x7a\x7a\x4f\x02\x0f\x3f\x64\x56\x84\x66\x91\x91\x84\xd5\x96\x01\x6d\x89\xf3\x44\x9c\x50\x56\x9d\x20\xd1\x1d\x09\x28\xf1\xe4\x4e\x3e\xd7\xf3\x5c\x8b\x0f\x13\xbe\xcb\x0c\xaa\x26\x0e\x7e\xfa\x69\x66\xef\xb5\x0b\x08\x0d\x08\x0d\x08\x0d\x08\x0d\x08\xfd\x4c\x11\xfa\x2d\x20\x34\x20\x34\x20\x34\x20\x34\x20\xf4\xf3\x42\x68\x6d\x0e\x39\x74\xb9\x72\x06\x39\xc3\x01\x9e\x24\xe6\x8f\xd4\xa4\xab\xc0\xfb\x86\x50\x7b\x3c\xc1\xc1\x9d\x61\xe8\x0e\xee\x88\x65\xbb\xdc\x1a\x74\xb7\x73\x27\x22\xad\xb2\xa5\x0d\xf9\x2b\x97\xda\x34\x11\xc7\x39\x12\xd3\xbf\x4b\x3f\x27\xe1\x23\x99\x13\x9c\x81\x71\x3e\xa3\x72\xed\x34\xc9\xb5\x93\xe6\x32\x5b\xd8\xae\xbe\x34\x67\x8b\x3a\xff\x40\xbf\xff\xf8\xfd\xff\x07\x00\x00\xff\xff\x9a\x87\xab\x55\x6f\x0c\x07\x00") + +func dataDataJsonBytes() ([]byte, error) { + return bindataRead( + _dataDataJson, + "data/data.json", + ) +} + +func dataDataJson() (*asset, error) { + bytes, err := dataDataJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/data.json", size: 461935, mode: os.FileMode(420), modTime: time.Unix(1557785965, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "data/data.json": dataDataJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "data": &bintree{nil, map[string]*bintree{ + "data.json": &bintree{dataDataJson, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/data/data.json b/data/data.json new file mode 100755 index 000000000..971e62459 --- /dev/null +++ b/data/data.json @@ -0,0 +1,4084 @@ +{ + "K8sVersionServiceOptions": { + "v1.10": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.11": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.12": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.13": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.14": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.14.10-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.14.9-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.15": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.15.10-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.15.6-rancher1-2": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.15.7-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.15.9-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.16": { + "etcd": null, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.16.3-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.16.4-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.16.6-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.16.6-rancher1-2": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.16.7-rancher1-1": { + "etcd": { + "client-cert-auth": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.17": { + "etcd": { + "client-cert-auth": "true", + "enable-v2": "true", + "peer-client-cert-auth": "true" + }, + "kubeapi": { + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + }, + "v1.9": { + "etcd": null, + "kubeapi": { + "admission-control": "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction", + "allow-privileged": "true", + "anonymous-auth": "false", + "bind-address": "0.0.0.0", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction", + "insecure-port": "0", + "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", + "profiling": "false", + "requestheader-extra-headers-prefix": "X-Remote-Extra-", + "requestheader-group-headers": "X-Remote-Group", + "requestheader-username-headers": "X-Remote-User", + "runtime-config": "authorization.k8s.io/v1beta1=true", + "secure-port": "6443", + "service-account-lookup": "true", + "storage-backend": "etcd3", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + }, + "kubelet": { + "address": "0.0.0.0", + "allow-privileged": "true", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cgroups-per-qos": "True", + "cni-bin-dir": "/opt/cni/bin", + "cni-conf-dir": "/etc/cni/net.d", + "enforce-node-allocatable": "", + "event-qps": "0", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "/etc/resolv.conf", + "streaming-connection-idle-timeout": "30m", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "healthz-bind-address": "127.0.0.1", + "v": "2" + }, + "kubeController": { + "address": "0.0.0.0", + "allocate-node-cidrs": "true", + "allow-untagged-cloud": "true", + "configure-cloud-routes": "false", + "enable-hostpath-provisioner": "false", + "leader-elect": "true", + "node-monitor-grace-period": "40s", + "pod-eviction-timeout": "5m0s", + "profiling": "false", + "terminated-pod-gc-threshold": "1000", + "v": "2" + }, + "scheduler": { + "address": "0.0.0.0", + "leader-elect": "true", + "profiling": "false", + "v": "2" + } + } + }, + "K8sVersionRKESystemImages": { + "v1.10.0-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.4", + "nginxProxy": "rancher/rke-tools:v0.1.4", + "certDownloader": "rancher/rke-tools:v0.1.4", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.4", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.0-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.10.1-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.8", + "nginxProxy": "rancher/rke-tools:v0.1.8", + "certDownloader": "rancher/rke-tools:v0.1.8", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.8", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.1-rancher2", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.10.11-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.13", + "nginxProxy": "rancher/rke-tools:v0.1.13", + "certDownloader": "rancher/rke-tools:v0.1.13", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.11-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.10.12-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.13", + "nginxProxy": "rancher/rke-tools:v0.1.13", + "certDownloader": "rancher/rke-tools:v0.1.13", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.12-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.10.3-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.10", + "nginxProxy": "rancher/rke-tools:v0.1.10", + "certDownloader": "rancher/rke-tools:v0.1.10", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.10", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.3-rancher2", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.10.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.10", + "nginxProxy": "rancher/rke-tools:v0.1.10", + "certDownloader": "rancher/rke-tools:v0.1.10", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.10", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.10.5-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.13", + "nginxProxy": "rancher/rke-tools:v0.1.13", + "certDownloader": "rancher/rke-tools:v0.1.13", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.10.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.1-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.13", + "nginxProxy": "rancher/rke-tools:v0.1.13", + "certDownloader": "rancher/rke-tools:v0.1.13", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.2-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.13", + "nginxProxy": "rancher/rke-tools:v0.1.13", + "certDownloader": "rancher/rke-tools:v0.1.13", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.2-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.2-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.2-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.3-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.3-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.6-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.15", + "nginxProxy": "rancher/rke-tools:v0.1.15", + "certDownloader": "rancher/rke-tools:v0.1.15", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.15", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.8-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.15", + "nginxProxy": "rancher/rke-tools:v0.1.15", + "certDownloader": "rancher/rke-tools:v0.1.15", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.15", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.8-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.9-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.15", + "nginxProxy": "rancher/rke-tools:v0.1.15", + "certDownloader": "rancher/rke-tools:v0.1.15", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.15", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.9-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.28", + "nginxProxy": "rancher/rke-tools:v0.1.28", + "certDownloader": "rancher/rke-tools:v0.1.28", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.11.9-rancher1-3": { + "etcd": "rancher/coreos-etcd:v3.2.18", + "alpine": "rancher/rke-tools:v0.1.16-2", + "nginxProxy": "rancher/rke-tools:v0.1.16-2", + "certDownloader": "rancher/rke-tools:v0.1.16-2", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16-2", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.11.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + }, + "v1.12.0-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.0-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.1-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.10-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.40", + "nginxProxy": "rancher/rke-tools:v0.1.40", + "certDownloader": "rancher/rke-tools:v0.1.40", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40", + "kubedns": "rancher/k8s-dns-kube-dns:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.10-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.12.10-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.42", + "nginxProxy": "rancher/rke-tools:v0.1.42", + "certDownloader": "rancher/rke-tools:v0.1.42", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42", + "kubedns": "rancher/k8s-dns-kube-dns:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.10-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.12.3-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.18", + "nginxProxy": "rancher/rke-tools:v0.1.18", + "certDownloader": "rancher/rke-tools:v0.1.18", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.18", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.3-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.4-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.4-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.5-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.6-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.6-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.12.7-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.7-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "coredns/coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.12.7-rancher1-3": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.28", + "nginxProxy": "rancher/rke-tools:v0.1.28", + "certDownloader": "rancher/rke-tools:v0.1.28", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28", + "kubedns": "rancher/k8s-dns-kube-dns:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.12.7-rancher1-4": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16-2", + "nginxProxy": "rancher/rke-tools:v0.1.16-2", + "certDownloader": "rancher/rke-tools:v0.1.16-2", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16-2", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.12.9-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.34", + "nginxProxy": "rancher/rke-tools:v0.1.34", + "certDownloader": "rancher/rke-tools:v0.1.34", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34", + "kubedns": "rancher/k8s-dns-kube-dns:1.14.13", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.12.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.1.3", + "calicoCni": "rancher/calico-cni:v3.1.3", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.3", + "canalCni": "rancher/calico-cni:v3.1.3", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.1-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.13.1-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "coredns": "coredns/coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.13.10-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16-2", + "nginxProxy": "rancher/rke-tools:v0.1.16-2", + "certDownloader": "rancher/rke-tools:v0.1.16-2", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16-2", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.10-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.13.10-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.42", + "nginxProxy": "rancher/rke-tools:v0.1.42", + "certDownloader": "rancher/rke-tools:v0.1.42", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.10-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.11-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.11-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.12-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.12-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.12-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.12-rancher2", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.4-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.4-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.13.4-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "coredns/coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.4-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.13.5-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.27", + "nginxProxy": "rancher/rke-tools:v0.1.27", + "certDownloader": "rancher/rke-tools:v0.1.27", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "coredns/coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.5-rancher1-3": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.28", + "nginxProxy": "rancher/rke-tools:v0.1.28", + "certDownloader": "rancher/rke-tools:v0.1.28", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.7-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.34", + "nginxProxy": "rancher/rke-tools:v0.1.34", + "certDownloader": "rancher/rke-tools:v0.1.34", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.13.9-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.2.24", + "alpine": "rancher/rke-tools:v0.1.16", + "nginxProxy": "rancher/rke-tools:v0.1.16", + "certDownloader": "rancher/rke-tools:v0.1.16", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0", + "flannelCni": "rancher/coreos-flannel-cni:v0.3.0", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause-amd64:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.3.1" + }, + "v1.13.9-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.2.24-rancher1", + "alpine": "rancher/rke-tools:v0.1.40", + "nginxProxy": "rancher/rke-tools:v0.1.40", + "certDownloader": "rancher/rke-tools:v0.1.40", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "coredns": "rancher/coredns-coredns:1.2.6", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0", + "kubernetes": "rancher/hyperkube:v1.13.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.1-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.28", + "nginxProxy": "rancher/rke-tools:v0.1.28", + "certDownloader": "rancher/rke-tools:v0.1.28", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "coredns/coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.1-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.28", + "nginxProxy": "rancher/rke-tools:v0.1.28", + "certDownloader": "rancher/rke-tools:v0.1.28", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.10-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.10-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoControllers": "rancher/calico-kube-controllers:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.3-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.34", + "nginxProxy": "rancher/rke-tools:v0.1.34", + "certDownloader": "rancher/rke-tools:v0.1.34", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.3-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.40", + "nginxProxy": "rancher/rke-tools:v0.1.40", + "certDownloader": "rancher/rke-tools:v0.1.40", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.6-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.42", + "nginxProxy": "rancher/rke-tools:v0.1.42", + "certDownloader": "rancher/rke-tools:v0.1.42", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.7-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.8-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.8-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.8-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.8-rancher2", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.9-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.14.9-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.14.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.10.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.4.0", + "calicoCni": "rancher/calico-cni:v3.4.0", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.4.0", + "canalCni": "rancher/calico-cni:v3.4.0", + "canalFlannel": "rancher/coreos-flannel:v0.10.0", + "weaveNode": "weaveworks/weave-kube:2.5.0", + "weaveCni": "weaveworks/weave-npc:2.5.0", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.1" + }, + "v1.15.0-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.34", + "nginxProxy": "rancher/rke-tools:v0.1.34", + "certDownloader": "rancher/rke-tools:v0.1.34", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.0-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3" + }, + "v1.15.10-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.10-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.15.2-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.40", + "nginxProxy": "rancher/rke-tools:v0.1.40", + "certDownloader": "rancher/rke-tools:v0.1.40", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.2-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3" + }, + "v1.15.3-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.42", + "nginxProxy": "rancher/rke-tools:v0.1.42", + "certDownloader": "rancher/rke-tools:v0.1.42", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.3-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3" + }, + "v1.15.4-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.4-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3" + }, + "v1.15.4-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.4-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.15.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3" + }, + "v1.15.5-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.15.5-rancher2-2": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.5-rancher2", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.15.6-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.15.7-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.15.9-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.10-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "coredns": "rancher/coredns-coredns:1.3.1", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0", + "kubernetes": "rancher/hyperkube:v1.15.9-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.7.4", + "calicoCni": "rancher/calico-cni:v3.7.4", + "calicoControllers": "rancher/calico-kube-controllers:v3.7.4", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.7.4", + "canalCni": "rancher/calico-cni:v3.7.4", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.3", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.1-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.1-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.8.1", + "calicoCni": "rancher/calico-cni:v3.8.1", + "calicoControllers": "rancher/calico-kube-controllers:v3.8.1", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "canalNode": "rancher/calico-node:v3.8.1", + "canalCni": "rancher/calico-cni:v3.8.1", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.2-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.50", + "nginxProxy": "rancher/rke-tools:v0.1.50", + "certDownloader": "rancher/rke-tools:v0.1.50", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.2-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.8.1", + "calicoCni": "rancher/calico-cni:v3.8.1", + "calicoControllers": "rancher/calico-kube-controllers:v3.8.1", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "canalNode": "rancher/calico-node:v3.8.1", + "canalCni": "rancher/calico-cni:v3.8.1", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.2-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.2-rancher2", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.8.1", + "calicoCni": "rancher/calico-cni:v3.8.1", + "calicoControllers": "rancher/calico-kube-controllers:v3.8.1", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "canalNode": "rancher/calico-node:v3.8.1", + "canalCni": "rancher/calico-cni:v3.8.1", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.3-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.3-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.8.1", + "calicoCni": "rancher/calico-cni:v3.8.1", + "calicoControllers": "rancher/calico-kube-controllers:v3.8.1", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "canalNode": "rancher/calico-node:v3.8.1", + "canalCni": "rancher/calico-cni:v3.8.1", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.4-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.4-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.6-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.6-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.6-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.16.7-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.3.15-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.2", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.16.7-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.4", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.17.0-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.4.3-rancher1", + "alpine": "rancher/rke-tools:v0.1.51", + "nginxProxy": "rancher/rke-tools:v0.1.51", + "certDownloader": "rancher/rke-tools:v0.1.51", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.5", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.17.0-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.8.1", + "calicoCni": "rancher/calico-cni:v3.8.1", + "calicoControllers": "rancher/calico-kube-controllers:v3.8.1", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "canalNode": "rancher/calico-node:v3.8.1", + "canalCni": "rancher/calico-cni:v3.8.1", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.6", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.17.0-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.4.3-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.5", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.17.0-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.6", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.17.2-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.4.3-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.5", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.17.2-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.6", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.17.2-rancher1-2": { + "etcd": "rancher/coreos-etcd:v3.4.3-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.5", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.17.2-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.6", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.17.3-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.4.3-rancher1", + "alpine": "rancher/rke-tools:v0.1.52", + "nginxProxy": "rancher/rke-tools:v0.1.52", + "certDownloader": "rancher/rke-tools:v0.1.52", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52", + "kubedns": "rancher/k8s-dns-kube-dns:1.15.0", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0", + "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "coredns": "rancher/coredns-coredns:1.6.5", + "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1", + "kubernetes": "rancher/hyperkube:v1.17.3-rancher1", + "flannel": "rancher/coreos-flannel:v0.11.0-rancher1", + "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5", + "calicoNode": "rancher/calico-node:v3.10.2", + "calicoCni": "rancher/calico-cni:v3.10.2", + "calicoControllers": "rancher/calico-kube-controllers:v3.10.2", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "canalNode": "rancher/calico-node:v3.10.2", + "canalCni": "rancher/calico-cni:v3.10.2", + "canalFlannel": "rancher/coreos-flannel:v0.11.0", + "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2", + "weaveNode": "weaveworks/weave-kube:2.5.2", + "weaveCni": "weaveworks/weave-npc:2.5.2", + "podInfraContainer": "rancher/pause:3.1", + "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1", + "metricsServer": "rancher/metrics-server:v0.3.6", + "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3" + }, + "v1.8.11-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.0.17", + "alpine": "rancher/rke-tools:v0.1.8", + "nginxProxy": "rancher/rke-tools:v0.1.8", + "certDownloader": "rancher/rke-tools:v0.1.8", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.8", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.5", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.5", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.5", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.8.11-rancher2", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.0", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.9.5-rancher1-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.4", + "nginxProxy": "rancher/rke-tools:v0.1.4", + "certDownloader": "rancher/rke-tools:v0.1.4", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.4", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.7", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.7", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.7", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.9.5-rancher1", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.0", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.9.7-rancher2-1": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.8", + "nginxProxy": "rancher/rke-tools:v0.1.8", + "certDownloader": "rancher/rke-tools:v0.1.8", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.8", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.7", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.7", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.7", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.9.7-rancher2", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.0", + "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4" + }, + "v1.9.7-rancher2-2": { + "etcd": "rancher/coreos-etcd:v3.1.12", + "alpine": "rancher/rke-tools:v0.1.13", + "nginxProxy": "rancher/rke-tools:v0.1.13", + "certDownloader": "rancher/rke-tools:v0.1.13", + "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13", + "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.7", + "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.7", + "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.7", + "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0", + "kubernetes": "rancher/hyperkube:v1.9.7-rancher2", + "flannel": "rancher/coreos-flannel:v0.9.1", + "flannelCni": "rancher/coreos-flannel-cni:v0.2.0", + "calicoNode": "rancher/calico-node:v3.1.1", + "calicoCni": "rancher/calico-cni:v3.1.1", + "calicoCtl": "rancher/calico-ctl:v2.0.0", + "canalNode": "rancher/calico-node:v3.1.1", + "canalCni": "rancher/calico-cni:v3.1.1", + "canalFlannel": "rancher/coreos-flannel:v0.9.1", + "weaveNode": "weaveworks/weave-kube:2.1.2", + "weaveCni": "weaveworks/weave-npc:2.1.2", + "podInfraContainer": "rancher/pause-amd64:3.0", + "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1", + "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4", + "metricsServer": "rancher/metrics-server-amd64:v0.2.1" + } + }, + "K8sVersionedTemplates": { + "calico": { + "\u003e=1.13.0-rancher0 \u003c1.15.0-rancher0": "calico-v1.13", + "\u003e=1.15.0-rancher0 \u003c1.16.0-alpha": "calico-v1.15", + "\u003e=1.16.0-alpha \u003c1.16.4-rancher1": "calico-v1.16", + "\u003e=1.16.4-rancher1": "calico-v1.17", + "\u003e=1.8.0-rancher0 \u003c1.13.0-rancher0": "calico-v1.8" + }, + "canal": { + "\u003e=1.13.0-rancher0 \u003c1.15.0-rancher0": "canal-v1.13", + "\u003e=1.15.0-rancher0 \u003c1.16.0-alpha": "canal-v1.15", + "\u003e=1.16.0-alpha \u003c1.16.4-rancher1": "canal-v1.16", + "\u003e=1.16.4-rancher1": "canal-v1.17", + "\u003e=1.8.0-rancher0 \u003c1.13.0-rancher0": "canal-v1.8" + }, + "coreDNS": { + "\u003e=1.16.0-alpha \u003c1.17.0-alpha": "coredns-v1.16", + "\u003e=1.17.0-alpha": "coredns-v1.17", + "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "coredns-v1.8" + }, + "flannel": { + "\u003e=1.15.0-rancher0 \u003c1.16.0-alpha": "flannel-v1.15", + "\u003e=1.16.0-alpha": "flannel-v1.16", + "\u003e=1.8.0-rancher0 \u003c1.15.0-rancher0": "flannel-v1.8" + }, + "kubeDNS": { + "\u003e=1.16.0-alpha": "kubedns-v1.16", + "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "kubedns-v1.8" + }, + "metricsServer": { + "\u003e=1.8.0-rancher0": "metricsserver-v1.8" + }, + "nginxIngress": { + "\u003e=1.13.10-rancher1-3 \u003c1.14.0-rancher0": "nginxingress-v1.15", + "\u003e=1.14.0-rancher0 \u003c=1.14.6-rancher1-1": "nginxingress-v1.8", + "\u003e=1.14.6-rancher2 \u003c1.15.0-rancher0": "nginxingress-v1.15", + "\u003e=1.15.0-rancher0 \u003c=1.15.3-rancher1-1": "nginxingress-v1.8", + "\u003e=1.15.3-rancher2": "nginxingress-v1.15", + "\u003e=1.8.0-rancher0 \u003c1.13.10-rancher1-3": "nginxingress-v1.8" + }, + "templateKeys": { + "calico-v1.13": "\n{{if eq .RBACConfig \"rbac\"}}\n## start rbac here\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n## end rbac here\n\n---\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # To enable Typha, set this to \"calico-typha\" *and* set a non-zero value for Typha replicas\n # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is\n # essential.\n typha_service_name: \"none\"\n # Configure the Calico backend to use.\n calico_backend: \"bird\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1440\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n serviceAccountName: calico-node\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Typha support: controlled by the ConfigMap.\n - name: FELIX_TYPHAK8SSERVICENAME\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: typha_service_name\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -bird-ready\n - -felix-ready\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n\n\n{{if ne .CloudProvider \"none\"}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: {{.CloudProvider}}-ippool\n namespace: kube-system\ndata:\n {{.CloudProvider}}-ippool: |-\n apiVersion: projectcalico.org/v3\n kind: IPPool\n metadata:\n name: ippool-ipip-1\n spec:\n cidr: {{.ClusterCIDR}}\n ipipMode: Always\n natOutgoing: true\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: calicoctl\n namespace: kube-system\nspec:\n hostNetwork: true\n restartPolicy: OnFailure\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: calicoctl\n image: {{.Calicoctl}}\n command: [\"/bin/sh\", \"-c\", \"calicoctl apply -f {{.CloudProvider}}-ippool.yaml\"]\n env:\n - name: DATASTORE_TYPE\n value: kubernetes\n volumeMounts:\n - name: ippool-config\n mountPath: /root/\n volumes:\n - name: ippool-config\n configMap:\n name: {{.CloudProvider}}-ippool\n items:\n - key: {{.CloudProvider}}-ippool\n path: {{.CloudProvider}}-ippool.yaml\n # Mount in the etcd TLS secrets.\n{{end}}\n", + "calico-v1.15": "\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - watch\n - list\n - get\n # Pods are queried to check for existence.\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n # IPAM resources are manipulated when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n # Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - clusterinformations\n verbs:\n - get\n - create\n - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n # These permissions are required for Calico CNI to perform IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n verbs:\n - get\n # Block affinities must also be watchable by confd for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration needs to get daemonsets. These permissions can be\n # removed if not upgrading from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n - daemonsets\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n calico_backend: \"bird\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1440\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n hostNetwork: true\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-node\n{{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container performs upgrade from host-local IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, or if you have already\n # upgraded to use calico-ipam.\n - name: upgrade-ipam\n image: {{.CNIImage}}\n command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico-node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"info\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -bird-ready\n - -felix-ready\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n volumes:\n # Used by calico-node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Mount in the directory for host-local IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, and can be removed\n # if not using the upgrade-ipam init container.\n - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n# See https://github.com/projectcalico/kube-controllers\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\nspec:\n # The controller can only have a single active instance.\n replicas: 1\n strategy:\n type: Recreate\n template:\n metadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-kube-controllers\n{{end}}\n containers:\n - name: calico-kube-controllers\n image: {{.ControllersImage}}\n env:\n # Choose which controllers to run.\n - name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n value: kubernetes\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n", + "calico-v1.16": "\n{{if eq .RBACConfig \"rbac\"}}\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - watch\n - list\n - get\n # Pods are queried to check for existence.\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n # IPAM resources are manipulated when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n # Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - clusterinformations\n verbs:\n - get\n - create\n - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n # These permissions are required for Calico CNI to perform IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n verbs:\n - get\n # Block affinities must also be watchable by confd for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration needs to get daemonsets. These permissions can be\n # removed if not upgrading from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n - daemonsets\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n calico_backend: \"bird\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1440\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n---\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n hostNetwork: true\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-node\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container performs upgrade from host-local IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, or if you have already\n # upgraded to use calico-ipam.\n - name: upgrade-ipam\n image: {{.CNIImage}}\n command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs calico-node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"info\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -bird-ready\n - -felix-ready\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n volumes:\n # Used by calico-node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Mount in the directory for host-local IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, and can be removed\n # if not using the upgrade-ipam init container.\n - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n\n# See https://github.com/projectcalico/kube-controllers\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\nspec:\n # The controllers can only have a single active instance.\n replicas: 1\n selector:\n matchLabels:\n k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n metadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-kube-controllers\n{{end}}\n priorityClassName: system-cluster-critical\n containers:\n - name: calico-kube-controllers\n image: {{.ControllersImage}}\n env:\n # Choose which controllers to run.\n - name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n value: kubernetes\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n", + "calico-v1.17": "\n{{if eq .RBACConfig \"rbac\"}}\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - watch\n - list\n - get\n # Pods are queried to check for existence.\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n # IPAM resources are manipulated when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n # Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - clusterinformations\n verbs:\n - get\n - create\n - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n # These permissions are required for Calico CNI to perform IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n verbs:\n - get\n # Block affinities must also be watchable by confd for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration needs to get daemonsets. These permissions can be\n # removed if not upgrading from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n - daemonsets\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n calico_backend: \"bird\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1440\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n---\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n hostNetwork: true\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-node\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container performs upgrade from host-local IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, or if you have already\n # upgraded to use calico-ipam.\n - name: upgrade-ipam\n image: {{.CNIImage}}\n command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs calico-node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"info\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -bird-ready\n - -felix-ready\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n volumes:\n # Used by calico-node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Mount in the directory for host-local IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, and can be removed\n # if not using the upgrade-ipam init container.\n - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n\n# See https://github.com/projectcalico/kube-controllers\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\nspec:\n # The controllers can only have a single active instance.\n replicas: 1\n selector:\n matchLabels:\n k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n metadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-kube-controllers\n{{end}}\n priorityClassName: system-cluster-critical\n containers:\n - name: calico-kube-controllers\n image: {{.ControllersImage}}\n env:\n # Choose which controllers to run.\n - name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n value: kubernetes\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n", + "calico-v1.8": "\n{{if eq .RBACConfig \"rbac\"}}\n## start rbac here\n\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico-node\nrules:\n - apiGroups: [\"\"]\n resources:\n - namespaces\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - update\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - list\n - watch\n - patch\n - apiGroups: [\"\"]\n resources:\n - services\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - update\n - watch\n - apiGroups: [\"extensions\"]\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - clusterinformations\n - hostendpoints\n verbs:\n - create\n - get\n - list\n - update\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n## end rbac here\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # To enable Typha, set this to \"calico-typha\" *and* set a non-zero value for Typha replicas\n # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is\n # essential.\n typha_service_name: \"none\"\n # The CNI network configuration to install on each node.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": 1500,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"k8s_api_root\": \"https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__\",\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure calico/node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n serviceAccountName: calico-node\n terminationGracePeriodSeconds: 0\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPV6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n value: \"1440\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Enable IP-in-IP within Felix.\n - name: FELIX_IPINIPENABLED\n value: \"true\"\n # Typha support: controlled by the ConfigMap.\n - name: FELIX_TYPHAK8SSERVICENAME\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: typha_service_name\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n\n\n{{if ne .CloudProvider \"none\"}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: {{.CloudProvider}}-ippool\n namespace: kube-system\ndata:\n {{.CloudProvider}}-ippool: |-\n apiVersion: projectcalico.org/v3\n kind: IPPool\n metadata:\n name: ippool-ipip-1\n spec:\n cidr: {{.ClusterCIDR}}\n ipipMode: Always\n natOutgoing: true\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: calicoctl\n namespace: kube-system\nspec:\n hostNetwork: true\n restartPolicy: OnFailure\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: calicoctl\n image: {{.Calicoctl}}\n command: [\"/bin/sh\", \"-c\", \"calicoctl apply -f {{.CloudProvider}}-ippool.yaml\"]\n env:\n - name: DATASTORE_TYPE\n value: kubernetes\n volumeMounts:\n - name: ippool-config\n mountPath: /root/\n volumes:\n - name: ippool-config\n configMap:\n name: {{.CloudProvider}}-ippool\n items:\n - key: {{.CloudProvider}}-ippool\n path: {{.CloudProvider}}-ippool.yaml\n # Mount in the etcd TLS secrets.\n{{end}}\n", + "canal-v1.13": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n serviceAccountName: canal\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n", + "canal-v1.15": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", + "canal-v1.16": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", + "canal-v1.17": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", + "canal-v1.8": "\n{{if eq .RBACConfig \"rbac\"}}\n# Calico Roles\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n - apiGroups: [\"\"]\n resources:\n - namespaces\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - update\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - list\n - watch\n - patch\n - apiGroups: [\"\"]\n resources:\n - services\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - update\n - watch\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - networkpolicies\n - clusterinformations\n - hostendpoints\n - globalnetworksets\n verbs:\n - create\n - get\n - list\n - update\n - watch\n\n---\n\n# Flannel roles\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n\n---\n\n# Bind the calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap can be used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"k8s_api_root\": \"https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__\",\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n serviceAccountName: canal\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # Disable IPV6 support in Felix.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - name: run\n mountPath: /run\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n - name: xtables-lock\n mountPath: /run/xtables.lock\n readOnly: false\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used by flannel.\n - name: run\n hostPath:\n path: /run\n - name: flannel-cfg\n configMap:\n name: canal-config\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy-only mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", + "coredns-v1.16": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}", + "coredns-v1.17": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n replicas: 1\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --nodelabels=node-role.kubernetes.io/worker=true,beta.kubernetes.io/os=linux\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}", + "coredns-v1.8": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n addonmanager.kubernetes.io/mode: Reconcile\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n addonmanager.kubernetes.io/mode: EnsureExists\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n addonmanager.kubernetes.io/mode: EnsureExists\ndata:\n Corefile: |\n .:53 {\n errors\n health\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n upstream\n fallthrough in-addr.arpa ip6.arpa\n ttl 30\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"CoreDNS\"\nspec:\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1}}\n{{end}}\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}", + "flannel-v1.15": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: ['psp.flannel.unprivileged']\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: flannel\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: flannel\n namespace: kube-system\n{{end}}\n---\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: psp.flannel.unprivileged\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default\n seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default\n apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default\n apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default\nspec:\n privileged: false\n volumes:\n - configMap\n - secret\n - emptyDir\n - hostPath\n allowedHostPaths:\n - pathPrefix: \"/etc/cni/net.d\"\n - pathPrefix: \"/etc/kube-flannel\"\n - pathPrefix: \"/run/flannel\"\n readOnlyRootFilesystem: false\n # Users and groups\n runAsUser:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n fsGroup:\n rule: RunAsAny\n # Privilege Escalation\n allowPrivilegeEscalation: false\n defaultAllowPrivilegeEscalation: false\n # Capabilities\n allowedCapabilities: ['NET_ADMIN']\n defaultAddCapabilities: []\n requiredDropCapabilities: []\n # Host namespaces\n hostPID: false\n hostIPC: false\n hostNetwork: true\n hostPorts:\n - min: 0\n max: 65535\n # SELinux\n seLinux:\n # SELinux is unsed in CaaSP\n rule: 'RunAsAny'\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: kube-flannel-cfg\n namespace: kube-system\n labels:\n tier: node\n app: flannel\ndata:\n cni-conf.json: |\n {\n \"name\": \"cbr0\",\n \"cniVersion\":\"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n }\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n name: kube-flannel\n namespace: kube-system\n labels:\n tier: node\n k8s-app: flannel\nspec:\n template:\n metadata:\n labels:\n tier: node\n k8s-app: flannel\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n - operator: Exists\n {{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: flannel\n {{end}}\n containers:\n - name: kube-flannel\n image: {{.Image}}\n command:\n - /opt/bin/flanneld\n args:\n - --ip-masq\n - --kube-subnet-mgr\n {{- if .FlannelInterface}}\n - --iface={{.FlannelInterface}}\n {{end}}\n resources:\n requests:\n cpu: \"100m\"\n memory: \"50Mi\"\n limits:\n cpu: \"100m\"\n memory: \"50Mi\"\n securityContext:\n privileged: false\n capabilities:\n add: [\"NET_ADMIN\"]\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumeMounts:\n - name: run\n mountPath: /run\n - name: cni\n mountPath: /etc/cni/net.d\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: kube-flannel-cfg\n key: cni-conf.json\n - name: CNI_CONF_NAME\n value: \"10-flannel.conflist\"\n volumeMounts:\n - name: cni\n mountPath: /host/etc/cni/net.d\n - name: host-cni-bin\n mountPath: /host/opt/cni/bin/\n volumes:\n - name: run\n hostPath:\n path: /run\n - name: cni\n hostPath:\n path: /etc/cni/net.d\n - name: flannel-cfg\n configMap:\n name: kube-flannel-cfg\n - name: host-cni-bin\n hostPath:\n path: /opt/cni/bin\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 20%\n{{end}}\n", + "flannel-v1.16": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: ['psp.flannel.unprivileged']\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: flannel\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: flannel\n namespace: kube-system\n{{end}}\n---\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: psp.flannel.unprivileged\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default\n seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default\n apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default\n apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default\nspec:\n privileged: false\n volumes:\n - configMap\n - secret\n - emptyDir\n - hostPath\n allowedHostPaths:\n - pathPrefix: \"/etc/cni/net.d\"\n - pathPrefix: \"/etc/kube-flannel\"\n - pathPrefix: \"/run/flannel\"\n readOnlyRootFilesystem: false\n # Users and groups\n runAsUser:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n fsGroup:\n rule: RunAsAny\n # Privilege Escalation\n allowPrivilegeEscalation: false\n defaultAllowPrivilegeEscalation: false\n # Capabilities\n allowedCapabilities: ['NET_ADMIN']\n defaultAddCapabilities: []\n requiredDropCapabilities: []\n # Host namespaces\n hostPID: false\n hostIPC: false\n hostNetwork: true\n hostPorts:\n - min: 0\n max: 65535\n # SELinux\n seLinux:\n # SELinux is unsed in CaaSP\n rule: 'RunAsAny'\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: kube-flannel-cfg\n namespace: kube-system\n labels:\n tier: node\n app: flannel\ndata:\n cni-conf.json: |\n {\n \"name\": \"cbr0\",\n \"cniVersion\":\"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n }\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: kube-flannel\n namespace: kube-system\n labels:\n tier: node\n k8s-app: flannel\nspec:\n selector:\n matchLabels:\n k8s-app: flannel\n template:\n metadata:\n labels:\n tier: node\n k8s-app: flannel\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n - operator: Exists\n {{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: flannel\n {{end}}\n containers:\n - name: kube-flannel\n image: {{.Image}}\n command:\n - /opt/bin/flanneld\n args:\n - --ip-masq\n - --kube-subnet-mgr\n {{- if .FlannelInterface}}\n - --iface={{.FlannelInterface}}\n {{end}}\n resources:\n requests:\n cpu: \"100m\"\n memory: \"50Mi\"\n limits:\n cpu: \"100m\"\n memory: \"50Mi\"\n securityContext:\n privileged: false\n capabilities:\n add: [\"NET_ADMIN\"]\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumeMounts:\n - name: run\n mountPath: /run\n - name: cni\n mountPath: /etc/cni/net.d\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: kube-flannel-cfg\n key: cni-conf.json\n - name: CNI_CONF_NAME\n value: \"10-flannel.conflist\"\n volumeMounts:\n - name: cni\n mountPath: /host/etc/cni/net.d\n - name: host-cni-bin\n mountPath: /host/opt/cni/bin/\n volumes:\n - name: run\n hostPath:\n path: /run\n - name: cni\n hostPath:\n path: /etc/cni/net.d\n - name: flannel-cfg\n configMap:\n name: kube-flannel-cfg\n - name: host-cni-bin\n hostPath:\n path: /opt/cni/bin\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 20%\n{{end}}\n", + "flannel-v1.8": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: flannel\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n{{- end}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: kube-flannel-cfg\n namespace: \"kube-system\"\n labels:\n tier: node\n app: flannel\ndata:\n cni-conf.json: |\n {\n \"name\":\"cbr0\",\n \"cniVersion\":\"0.3.1\",\n \"plugins\":[\n {\n \"type\":\"flannel\",\n \"delegate\":{\n \"forceAddress\":true,\n \"isDefaultGateway\":true\n }\n },\n {\n \"type\":\"portmap\",\n \"capabilities\":{\n \"portMappings\":true\n }\n }\n ]\n }\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n name: kube-flannel\n namespace: \"kube-system\"\n labels:\n tier: node\n k8s-app: flannel\nspec:\n template:\n metadata:\n labels:\n tier: node\n k8s-app: flannel\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n serviceAccountName: flannel\n containers:\n - name: kube-flannel\n image: {{.Image}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n cpu: 300m\n memory: 500M\n requests:\n cpu: 150m\n memory: 64M\n {{- if .FlannelInterface}}\n command: [\"/opt/bin/flanneld\",\"--ip-masq\",\"--kube-subnet-mgr\",\"--iface={{.FlannelInterface}}\"]\n {{- else}}\n command: [\"/opt/bin/flanneld\",\"--ip-masq\",\"--kube-subnet-mgr\"]\n {{- end}}\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumeMounts:\n - name: run\n mountPath: /run\n - name: cni\n mountPath: /etc/cni/net.d\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: kube-flannel-cfg\n key: cni-conf.json\n - name: CNI_CONF_NAME\n value: \"10-flannel.conflist\"\n volumeMounts:\n - name: cni\n mountPath: /host/etc/cni/net.d\n - name: host-cni-bin\n mountPath: /host/opt/cni/bin/\n hostNetwork: true\n tolerations:\n {{- if ge .ClusterVersion \"v1.12\" }}\n - operator: Exists\n effect: NoSchedule\n - operator: Exists\n effect: NoExecute\n {{- else }}\n - key: node-role.kubernetes.io/controlplane\n operator: Exists\n effect: NoSchedule\n - key: node-role.kubernetes.io/etcd\n operator: Exists\n effect: NoExecute\n {{- end }}\n - key: node.kubernetes.io/not-ready\n effect: NoSchedule\n operator: Exists\n volumes:\n - name: run\n hostPath:\n path: /run\n - name: cni\n hostPath:\n path: /etc/cni/net.d\n - name: flannel-cfg\n configMap:\n name: kube-flannel-cfg\n - name: host-cni-bin\n hostPath:\n path: /opt/cni/bin\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 20%\n{{end}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: flannel\n namespace: kube-system\n", + "kubedns-v1.16": "\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: kube-dns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: kube-dns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: kube-dns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: kube-dns-autoscaler\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n serviceAccountName: kube-dns-autoscaler\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.KubeDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=kube-dns-autoscaler\n - --target=Deployment/kube-dns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --logtostderr=true\n - --v=2\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: kube-dns-autoscaler\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:kube-dns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:kube-dns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: kube-dns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:kube-dns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\nspec:\n # replicas: not specified here:\n # 1. In order to make Addon Manager do not reconcile this replicas parameter.\n # 2. Default is 1.\n # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n affinity:\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n volumes:\n - name: kube-dns-config\n configMap:\n name: kube-dns\n optional: true\n containers:\n - name: kubedns\n image: {{.KubeDNSImage}}\n resources:\n # TODO: Set memory limits when we've profiled the container for large\n # clusters, then set request = limit to keep this container in\n # guaranteed class. Currently, this container falls into the\n # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n livenessProbe:\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n # we poll on pod startup for the Kubernetes master service and\n # only setup the /readiness HTTP server once that's available.\n initialDelaySeconds: 3\n timeoutSeconds: 5\n args:\n - --domain={{.ClusterDomain}}.\n - --dns-port=10053\n - --config-dir=/kube-dns-config\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n volumeMounts:\n - name: kube-dns-config\n mountPath: /kube-dns-config\n - name: dnsmasq\n image: {{.DNSMasqImage}}\n livenessProbe:\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --log-facility=-\n - --server=/{{.ClusterDomain}}/127.0.0.1#10053\n\t{{- if .ReverseCIDRs }}\n\t{{- range .ReverseCIDRs }}\n - --server=/{{.}}/127.0.0.1#10053\n\t{{- end }}\n\t{{- else }}\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/ip6.arpa/127.0.0.1#10053\n\t{{- end }}\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - name: kube-dns-config\n mountPath: /etc/k8s/dns/dnsmasq-nanny\n - name: sidecar\n image: {{.KubeDNSSidecarImage}}\n livenessProbe:\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n memory: 20Mi\n cpu: 10m\n dnsPolicy: Default # Don't use cluster DNS.\n serviceAccountName: kube-dns\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"KubeDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: kube-dns\n namespace: kube-system\ndata:\n{{- if .UpstreamNameservers }}\n upstreamNameservers: |\n [{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf \"%q\" .}}{{end}}]\n{{- end }}\n{{- if .StubDomains }}\n stubDomains: |\n {{ GetKubednsStubDomains .StubDomains }}\n{{- end }}", + "kubedns-v1.8": "\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: kube-dns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: kube-dns-autoscaler\nspec:\n template:\n metadata:\n labels:\n k8s-app: kube-dns-autoscaler\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n serviceAccountName: kube-dns-autoscaler\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.KubeDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=kube-dns-autoscaler\n - --target=Deployment/kube-dns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1}}\n{{end}}\n - --logtostderr=true\n - --v=2\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: kube-dns-autoscaler\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:kube-dns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:kube-dns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: kube-dns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:kube-dns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\nspec:\n # replicas: not specified here:\n # 1. In order to make Addon Manager do not reconcile this replicas parameter.\n # 2. Default is 1.\n # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n affinity:\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n volumes:\n - name: kube-dns-config\n configMap:\n name: kube-dns\n optional: true\n containers:\n - name: kubedns\n image: {{.KubeDNSImage}}\n resources:\n # TODO: Set memory limits when we've profiled the container for large\n # clusters, then set request = limit to keep this container in\n # guaranteed class. Currently, this container falls into the\n # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n livenessProbe:\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n # we poll on pod startup for the Kubernetes master service and\n # only setup the /readiness HTTP server once that's available.\n initialDelaySeconds: 3\n timeoutSeconds: 5\n args:\n - --domain={{.ClusterDomain}}.\n - --dns-port=10053\n - --config-dir=/kube-dns-config\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n volumeMounts:\n - name: kube-dns-config\n mountPath: /kube-dns-config\n - name: dnsmasq\n image: {{.DNSMasqImage}}\n livenessProbe:\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --log-facility=-\n - --server=/{{.ClusterDomain}}/127.0.0.1#10053\n\t{{- if .ReverseCIDRs }}\n\t{{- range .ReverseCIDRs }}\n - --server=/{{.}}/127.0.0.1#10053\n\t{{- end }}\n\t{{- else }}\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/ip6.arpa/127.0.0.1#10053\n\t{{- end }}\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - name: kube-dns-config\n mountPath: /etc/k8s/dns/dnsmasq-nanny\n - name: sidecar\n image: {{.KubeDNSSidecarImage}}\n livenessProbe:\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n memory: 20Mi\n cpu: 10m\n dnsPolicy: Default # Don't use cluster DNS.\n serviceAccountName: kube-dns\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"KubeDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: kube-dns\n namespace: kube-system\ndata:\n{{- if .UpstreamNameservers }}\n upstreamNameservers: |\n [{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf \"%q\" .}}{{end}}]\n{{- end }}\n{{- if .StubDomains }}\n stubDomains: |\n {{ GetKubednsStubDomains .StubDomains }}\n{{- end }}", + "metricsserver-v1.8": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: metrics-server:system:auth-delegator\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:auth-delegator\nsubjects:\n- kind: ServiceAccount\n name: metrics-server\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: metrics-server-auth-reader\n namespace: kube-system\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: extension-apiserver-authentication-reader\nsubjects:\n- kind: ServiceAccount\n name: metrics-server\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: system:metrics-server\nrules:\n- apiGroups:\n - \"\"\n resources:\n - pods\n - nodes\n - nodes/stats\n - namespaces\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"extensions\"\n resources:\n - deployments\n verbs:\n - get\n - list\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: system:metrics-server\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:metrics-server\nsubjects:\n- kind: ServiceAccount\n name: metrics-server\n namespace: kube-system\n{{- end }}\n---\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n name: v1beta1.metrics.k8s.io\nspec:\n service:\n name: metrics-server\n namespace: kube-system\n group: metrics.k8s.io\n version: v1beta1\n insecureSkipTLSVerify: true\n groupPriorityMinimum: 100\n versionPriority: 100\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: metrics-server\n namespace: kube-system\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: metrics-server\n namespace: kube-system\n labels:\n k8s-app: metrics-server\nspec:\n{{if .Replicas}}\n replicas: {{.Replicas}}\n{{end}}\n selector:\n matchLabels:\n k8s-app: metrics-server\n{{if .UpdateStrategy}}\n strategy:\n{{ toYaml .UpdateStrategy | indent 4}}\n{{end}}\n template:\n metadata:\n name: metrics-server\n labels:\n k8s-app: metrics-server\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n serviceAccountName: metrics-server\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: metrics-server\n image: {{ .MetricsServerImage }}\n imagePullPolicy: Always\n command:\n - /metrics-server\n {{- if eq .Version \"v0.3\" }}\n - --kubelet-insecure-tls\n - --kubelet-preferred-address-types=InternalIP\n - --logtostderr\n {{- else }}\n - --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true\u0026kubeletPort=10250\u0026useServiceAccount=true\u0026insecure=true\n {{- end }}\n {{ range $k,$v := .Options }}\n - --{{ $k }}={{ $v }}\n {{ end }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: metrics-server\n namespace: kube-system\n labels:\n kubernetes.io/name: \"Metrics-server\"\nspec:\n selector:\n k8s-app: metrics-server\n ports:\n - port: 443\n protocol: TCP\n targetPort: 443\n", + "nginxingress-v1.15": "\napiVersion: v1\nkind: Namespace\nmetadata:\n name: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: nginx-configuration\n namespace: ingress-nginx\n labels:\n app: ingress-nginx\ndata:\n{{ range $k,$v := .Options }}\n {{ $k }}: \"{{ $v }}\"\n{{ end }}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: tcp-services\n namespace: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: udp-services\n namespace: ingress-nginx\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: nginx-ingress-serviceaccount\n namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: nginx-ingress-clusterrole\nrules:\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n - endpoints\n - nodes\n - pods\n - secrets\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - services\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - \"extensions\"\n - \"networking.k8s.io\"\n resources:\n - ingresses\n - daemonsets\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n - apiGroups:\n - \"extensions\"\n - \"networking.k8s.io\"\n resources:\n - ingresses/status\n verbs:\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: nginx-ingress-role\n namespace: ingress-nginx\nrules:\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n - pods\n - secrets\n - namespaces\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n # Defaults to \"\u003celection-id\u003e-\u003cingress-class\u003e\"\n # Here: \"\u003cingress-controller-leader\u003e-\u003cnginx\u003e\"\n # This has to be adapted if you change either parameter\n # when launching the nginx-ingress-controller.\n - \"ingress-controller-leader-nginx\"\n verbs:\n - get\n - update\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - create\n - apiGroups:\n - \"\"\n resources:\n - endpoints\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: nginx-ingress-role-nisa-binding\n namespace: ingress-nginx\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: nginx-ingress-role\nsubjects:\n - kind: ServiceAccount\n name: nginx-ingress-serviceaccount\n namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: nginx-ingress-clusterrole-nisa-binding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: nginx-ingress-clusterrole\nsubjects:\n - kind: ServiceAccount\n name: nginx-ingress-serviceaccount\n namespace: ingress-nginx\n{{ end }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: nginx-ingress-controller\n namespace: ingress-nginx\nspec:\n selector:\n matchLabels:\n app: ingress-nginx\n{{if .UpdateStrategy}}\n updateStrategy:\n{{ toYaml .UpdateStrategy | indent 4}}\n{{end}}\n template:\n metadata:\n labels:\n app: ingress-nginx\n annotations:\n prometheus.io/port: '10254'\n prometheus.io/scrape: 'true'\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n hostNetwork: true\n {{if .DNSPolicy}}\n dnsPolicy: {{.DNSPolicy}}\n {{end}}\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: nginx-ingress-serviceaccount\n {{ end }}\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n {{- if ne .AlpineImage \"\"}}\n initContainers:\n - command:\n - sh\n - -c\n - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range=\"1024 65535\"\n image: {{.AlpineImage}}\n imagePullPolicy: IfNotPresent\n name: sysctl\n securityContext:\n privileged: true\n {{- end }}\n containers:\n - name: nginx-ingress-controller\n image: {{.IngressImage}}\n args:\n - /nginx-ingress-controller\n - --default-backend-service=$(POD_NAMESPACE)/default-http-backend\n - --configmap=$(POD_NAMESPACE)/nginx-configuration\n - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services\n - --udp-services-configmap=$(POD_NAMESPACE)/udp-services\n - --annotations-prefix=nginx.ingress.kubernetes.io\n {{ range $k, $v := .ExtraArgs }}\n - --{{ $k }}{{if ne $v \"\" }}={{ $v }}{{end}}\n {{ end }}\n {{- if eq .AlpineImage \"\"}}\n securityContext:\n capabilities:\n drop:\n - ALL\n add:\n - NET_BIND_SERVICE\n runAsUser: 33\n {{- end }}\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n{{if .ExtraEnvs}}\n{{ toYaml .ExtraEnvs | indent 12}}\n{{end}}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n failureThreshold: 3\n httpGet:\n path: /healthz\n port: 10254\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n readinessProbe:\n failureThreshold: 3\n httpGet:\n path: /healthz\n port: 10254\n scheme: HTTP\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n{{if .ExtraVolumeMounts}}\n volumeMounts:\n{{ toYaml .ExtraVolumeMounts | indent 12}}\n{{end}}\n{{if .ExtraVolumes}}\n volumes:\n{{ toYaml .ExtraVolumes | indent 8}}\n{{end}}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: default-http-backend\n labels:\n app: default-http-backend\n namespace: ingress-nginx\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: default-http-backend\n template:\n metadata:\n labels:\n app: default-http-backend\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n terminationGracePeriodSeconds: 60\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: default-http-backend\n # Any image is permissable as long as:\n # 1. It serves a 404 page at /\n # 2. It serves 200 on a /healthz endpoint\n image: {{.IngressBackend}}\n livenessProbe:\n httpGet:\n path: /healthz\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 30\n timeoutSeconds: 5\n ports:\n - containerPort: 8080\n resources:\n limits:\n cpu: 10m\n memory: 20Mi\n requests:\n cpu: 10m\n memory: 20Mi\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: default-http-backend\n namespace: ingress-nginx\n labels:\n app: default-http-backend\nspec:\n ports:\n - port: 80\n targetPort: 8080\n selector:\n app: default-http-backend\n", + "nginxingress-v1.8": "\napiVersion: v1\nkind: Namespace\nmetadata:\n name: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: nginx-configuration\n namespace: ingress-nginx\n labels:\n app: ingress-nginx\ndata:\n{{ range $k,$v := .Options }}\n {{ $k }}: \"{{ $v }}\"\n{{ end }}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: tcp-services\n namespace: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: udp-services\n namespace: ingress-nginx\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: nginx-ingress-serviceaccount\n namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: nginx-ingress-clusterrole\nrules:\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n - endpoints\n - nodes\n - pods\n - secrets\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - services\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - \"extensions\"\n resources:\n - ingresses\n - daemonsets\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n - apiGroups:\n - \"extensions\"\n resources:\n - ingresses/status\n verbs:\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: nginx-ingress-role\n namespace: ingress-nginx\nrules:\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n - pods\n - secrets\n - namespaces\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n # Defaults to \"\u003celection-id\u003e-\u003cingress-class\u003e\"\n # Here: \"\u003cingress-controller-leader\u003e-\u003cnginx\u003e\"\n # This has to be adapted if you change either parameter\n # when launching the nginx-ingress-controller.\n - \"ingress-controller-leader-nginx\"\n verbs:\n - get\n - update\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - create\n - apiGroups:\n - \"\"\n resources:\n - endpoints\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: nginx-ingress-role-nisa-binding\n namespace: ingress-nginx\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: nginx-ingress-role\nsubjects:\n - kind: ServiceAccount\n name: nginx-ingress-serviceaccount\n namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: nginx-ingress-clusterrole-nisa-binding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: nginx-ingress-clusterrole\nsubjects:\n - kind: ServiceAccount\n name: nginx-ingress-serviceaccount\n namespace: ingress-nginx\n{{ end }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: nginx-ingress-controller\n namespace: ingress-nginx\nspec:\n selector:\n matchLabels:\n app: ingress-nginx\n{{if .UpdateStrategy}}\n updateStrategy:\n{{ toYaml .UpdateStrategy | indent 4}}\n{{end}}\n template:\n metadata:\n labels:\n app: ingress-nginx\n annotations:\n prometheus.io/port: '10254'\n prometheus.io/scrape: 'true'\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n hostNetwork: true\n {{if .DNSPolicy}}\n dnsPolicy: {{.DNSPolicy}}\n {{end}}\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: nginx-ingress-serviceaccount\n {{ end }}\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n {{- if ne .AlpineImage \"\"}}\n initContainers:\n - command:\n - sh\n - -c\n - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range=\"1024 65535\"\n image: {{.AlpineImage}}\n imagePullPolicy: IfNotPresent\n name: sysctl\n securityContext:\n privileged: true\n {{- end }}\n containers:\n - name: nginx-ingress-controller\n image: {{.IngressImage}}\n args:\n - /nginx-ingress-controller\n - --default-backend-service=$(POD_NAMESPACE)/default-http-backend\n - --configmap=$(POD_NAMESPACE)/nginx-configuration\n - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services\n - --udp-services-configmap=$(POD_NAMESPACE)/udp-services\n - --annotations-prefix=nginx.ingress.kubernetes.io\n {{ range $k, $v := .ExtraArgs }}\n - --{{ $k }}{{if ne $v \"\" }}={{ $v }}{{end}}\n {{ end }}\n {{- if eq .AlpineImage \"\"}}\n securityContext:\n capabilities:\n drop:\n - ALL\n add:\n - NET_BIND_SERVICE\n runAsUser: 33\n {{- end }}\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n{{if .ExtraEnvs}}\n{{ toYaml .ExtraEnvs | indent 12}}\n{{end}}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n failureThreshold: 3\n httpGet:\n path: /healthz\n port: 10254\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n readinessProbe:\n failureThreshold: 3\n httpGet:\n path: /healthz\n port: 10254\n scheme: HTTP\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n{{if .ExtraVolumeMounts}}\n volumeMounts:\n{{ toYaml .ExtraVolumeMounts | indent 12}}\n{{end}}\n{{if .ExtraVolumes}}\n volumes:\n{{ toYaml .ExtraVolumes | indent 8}}\n{{end}}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: default-http-backend\n labels:\n app: default-http-backend\n namespace: ingress-nginx\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: default-http-backend\n template:\n metadata:\n labels:\n app: default-http-backend\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n - key: node-role.kubernetes.io/worker\n operator: Exists\n terminationGracePeriodSeconds: 60\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: default-http-backend\n # Any image is permissable as long as:\n # 1. It serves a 404 page at /\n # 2. It serves 200 on a /healthz endpoint\n image: {{.IngressBackend}}\n livenessProbe:\n httpGet:\n path: /healthz\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 30\n timeoutSeconds: 5\n ports:\n - containerPort: 8080\n resources:\n limits:\n cpu: 10m\n memory: 20Mi\n requests:\n cpu: 10m\n memory: 20Mi\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: default-http-backend\n namespace: ingress-nginx\n labels:\n app: default-http-backend\nspec:\n ports:\n - port: 80\n targetPort: 8080\n selector:\n app: default-http-backend\n", + "weave-v1.16": "\n---\n# This ConfigMap can be used to configure a self-hosted Weave Net installation.\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n name: weave-net\n namespace: kube-system\n - apiVersion: apps/v1\n kind: DaemonSet\n metadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\n spec:\n selector:\n matchLabels:\n name: weave-net\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n scheduler.alpha.kubernetes.io/tolerations: \u003e-\n [{\"key\":\"dedicated\",\"operator\":\"Equal\",\"value\":\"master\",\"effect\":\"NoSchedule\"}]\n labels:\n name: weave-net\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n containers:\n - name: weave\n command:\n - /home/weave/launch.sh\n env:\n - name: HOSTNAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: IPALLOC_RANGE\n value: \"{{.ClusterCIDR}}\"\n {{- if .WeavePassword}}\n - name: WEAVE_PASSWORD\n value: \"{{.WeavePassword}}\"\n {{- end}}\n {{- if .MTU }}\n {{- if ne .MTU 0 }}\n - name: WEAVE_MTU\n value: \"{{.MTU}}\"\n {{- end }}\n {{- end }}\n image: {{.Image}}\n readinessProbe:\n httpGet:\n host: 127.0.0.1\n path: /status\n port: 6784\n initialDelaySeconds: 30\n resources:\n requests:\n cpu: 10m\n securityContext:\n privileged: true\n volumeMounts:\n - name: weavedb\n mountPath: /weavedb\n - name: cni-bin\n mountPath: /host/opt\n - name: cni-bin2\n mountPath: /host/home\n - name: cni-conf\n mountPath: /host/etc\n - name: dbus\n mountPath: /host/var/lib/dbus\n - name: lib-modules\n mountPath: /lib/modules\n - name: xtables-lock\n mountPath: /run/xtables.lock\n - name: weave-npc\n env:\n - name: HOSTNAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n image: {{.CNIImage}}\n resources:\n requests:\n cpu: 10m\n securityContext:\n privileged: true\n volumeMounts:\n - name: xtables-lock\n mountPath: /run/xtables.lock\n - name: weave-plugins\n command:\n - /opt/rke-tools/weave-plugins-cni.sh\n image: {{.WeaveLoopbackImage}}\n securityContext:\n privileged: true\n volumeMounts:\n - name: cni-bin\n mountPath: /opt\n hostNetwork: true\n hostPID: true\n restartPolicy: Always\n securityContext:\n seLinuxOptions: {}\n serviceAccountName: weave-net\n tolerations:\n - operator: Exists\n effect: NoSchedule\n - operator: Exists\n effect: NoExecute\n volumes:\n - name: weavedb\n hostPath:\n path: /var/lib/weave\n - name: cni-bin\n hostPath:\n path: /opt\n - name: cni-bin2\n hostPath:\n path: /home\n - name: cni-conf\n hostPath:\n path: /etc\n - name: dbus\n hostPath:\n path: /var/lib/dbus\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 8}}\n{{end}}\n type: RollingUpdate\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: weave-net\n labels:\n name: weave-net\nrules:\n - apiGroups:\n - ''\n resources:\n - pods\n - namespaces\n - nodes\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - ''\n resources:\n - nodes/status\n verbs:\n - patch\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: weave-net\n labels:\n name: weave-net\nroleRef:\n kind: ClusterRole\n name: weave-net\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n - kind: ServiceAccount\n name: weave-net\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\nrules:\n - apiGroups:\n - ''\n resourceNames:\n - weave-net\n resources:\n - configmaps\n verbs:\n - get\n - update\n - apiGroups:\n - ''\n resources:\n - configmaps\n verbs:\n - create\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\nroleRef:\n kind: Role\n name: weave-net\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n - kind: ServiceAccount\n name: weave-net\n namespace: kube-system\n{{- end}}\n", + "weave-v1.8": "\n---\n# This ConfigMap can be used to configure a self-hosted Weave Net installation.\napiVersion: v1\nkind: List\nitems:\n - apiVersion: v1\n kind: ServiceAccount\n metadata:\n name: weave-net\n namespace: kube-system\n - apiVersion: extensions/v1beta1\n kind: DaemonSet\n metadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\n spec:\n template:\n metadata:\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n scheduler.alpha.kubernetes.io/tolerations: \u003e-\n [{\"key\":\"dedicated\",\"operator\":\"Equal\",\"value\":\"master\",\"effect\":\"NoSchedule\"}]\n labels:\n name: weave-net\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n containers:\n - name: weave\n command:\n - /home/weave/launch.sh\n env:\n - name: HOSTNAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: IPALLOC_RANGE\n value: \"{{.ClusterCIDR}}\"\n {{- if .WeavePassword}}\n - name: WEAVE_PASSWORD\n value: \"{{.WeavePassword}}\"\n {{- end}}\n {{- if .MTU }}\n {{- if ne .MTU 0 }}\n - name: WEAVE_MTU\n value: \"{{.MTU}}\"\n {{- end }}\n {{- end }}\n image: {{.Image}}\n readinessProbe:\n httpGet:\n host: 127.0.0.1\n path: /status\n port: 6784\n initialDelaySeconds: 30\n resources:\n requests:\n cpu: 10m\n securityContext:\n privileged: true\n volumeMounts:\n - name: weavedb\n mountPath: /weavedb\n - name: cni-bin\n mountPath: /host/opt\n - name: cni-bin2\n mountPath: /host/home\n - name: cni-conf\n mountPath: /host/etc\n - name: dbus\n mountPath: /host/var/lib/dbus\n - name: lib-modules\n mountPath: /lib/modules\n - name: xtables-lock\n mountPath: /run/xtables.lock\n - name: weave-npc\n env:\n - name: HOSTNAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n image: {{.CNIImage}}\n resources:\n requests:\n cpu: 10m\n securityContext:\n privileged: true\n volumeMounts:\n - name: xtables-lock\n mountPath: /run/xtables.lock\n - name: weave-plugins\n command:\n - /opt/rke-tools/weave-plugins-cni.sh\n image: {{.WeaveLoopbackImage}}\n securityContext:\n privileged: true\n volumeMounts:\n - name: cni-bin\n mountPath: /opt\n hostNetwork: true\n hostPID: true\n restartPolicy: Always\n securityContext:\n seLinuxOptions: {}\n serviceAccountName: weave-net\n tolerations:\n - operator: Exists\n effect: NoSchedule\n - operator: Exists\n effect: NoExecute\n volumes:\n - name: weavedb\n hostPath:\n path: /var/lib/weave\n - name: cni-bin\n hostPath:\n path: /opt\n - name: cni-bin2\n hostPath:\n path: /home\n - name: cni-conf\n hostPath:\n path: /etc\n - name: dbus\n hostPath:\n path: /var/lib/dbus\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 8}}\n{{end}}\n type: RollingUpdate\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: weave-net\n labels:\n name: weave-net\nrules:\n - apiGroups:\n - ''\n resources:\n - pods\n - namespaces\n - nodes\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - ''\n resources:\n - nodes/status\n verbs:\n - patch\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: weave-net\n labels:\n name: weave-net\nroleRef:\n kind: ClusterRole\n name: weave-net\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n - kind: ServiceAccount\n name: weave-net\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\nrules:\n - apiGroups:\n - ''\n resourceNames:\n - weave-net\n resources:\n - configmaps\n verbs:\n - get\n - update\n - apiGroups:\n - ''\n resources:\n - configmaps\n verbs:\n - create\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: weave-net\n labels:\n name: weave-net\n namespace: kube-system\nroleRef:\n kind: Role\n name: weave-net\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n - kind: ServiceAccount\n name: weave-net\n namespace: kube-system\n{{- end}}\n" + }, + "weave": { + "\u003e=1.16.0-alpha": "weave-v1.16", + "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "weave-v1.8" + } + }, + "K8sVersionInfo": { + "v1.10": { + "maxRKEVersion": "0.2.2", + "maxRancherVersion": "2.2" + }, + "v1.10.1-rancher1": { + "deprecateRKEVersion": "0.2.2", + "deprecateRancherVersion": "2.2" + }, + "v1.11": { + "maxRKEVersion": "0.2.2", + "maxRancherVersion": "2.2" + }, + "v1.12": { + "maxRKEVersion": "0.2.2", + "maxRancherVersion": "2.2" + }, + "v1.13": { + "maxRKEVersion": "0.3.1", + "maxRancherVersion": "2.3.1" + }, + "v1.14": { + "maxRKEVersion": "1.0.0", + "maxRancherVersion": "2.3.3" + }, + "v1.15.5-rancher1-1": { + "maxRKEVersion": "0.2.8", + "maxRancherVersion": "2.2.9" + }, + "v1.8": { + "maxRKEVersion": "0.2.2", + "maxRancherVersion": "2.2" + }, + "v1.8.10-rancher1-1": { + "deprecateRKEVersion": "0.2.2", + "deprecateRancherVersion": "2.2" + }, + "v1.8.11-rancher1": { + "deprecateRKEVersion": "0.2.2", + "deprecateRancherVersion": "2.2" + }, + "v1.9": { + "maxRKEVersion": "0.2.2", + "maxRancherVersion": "2.2" + }, + "v1.9.7-rancher1": { + "deprecateRKEVersion": "0.2.2", + "deprecateRancherVersion": "2.2" + } + }, + "RancherDefaultK8sVersions": { + "2.3": "v1.17.x", + "2.3.0": "v1.15.x", + "2.3.1": "v1.15.x", + "2.3.2": "v1.15.x", + "2.3.3": "v1.16.x", + "default": "v1.17.x" + }, + "RKEDefaultK8sVersions": { + "0.3": "v1.16.3-rancher1-1", + "default": "v1.17.3-rancher1-1" + }, + "K8sVersionDockerInfo": { + "1.10": [ + "1.11.x", + "1.12.x", + "1.13.x", + "17.03.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.11": [ + "1.11.x", + "1.12.x", + "1.13.x", + "17.03.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.12": [ + "1.11.x", + "1.12.x", + "1.13.x", + "17.03.x", + "17.06.x", + "17.09.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.13": [ + "1.11.x", + "1.12.x", + "1.13.x", + "17.03.x", + "17.06.x", + "17.09.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.14": [ + "1.13.x", + "17.03.x", + "17.06.x", + "17.09.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.15": [ + "1.13.x", + "17.03.x", + "17.06.x", + "17.09.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.16": [ + "1.13.x", + "17.03.x", + "17.06.x", + "17.09.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.17": [ + "1.13.x", + "17.03.x", + "17.06.x", + "17.09.x", + "18.06.x", + "18.09.x", + "19.03.x" + ], + "1.8": [ + "1.11.x", + "1.12.x", + "1.13.x", + "17.03.x" + ], + "1.9": [ + "1.11.x", + "1.12.x", + "1.13.x", + "17.03.x", + "18.06.x", + "18.09.x", + "19.03.x" + ] + }, + "K8sVersionWindowsServiceOptions": { + "v1.15": { + "etcd": null, + "kubeapi": null, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cert-dir": "[PREFIX_PATH]/var/lib/kubelet/pki", + "cgroups-per-qos": "false", + "cni-bin-dir": "[PREFIX_PATH]/opt/cni/bin", + "cni-conf-dir": "[PREFIX_PATH]/etc/cni/net.d", + "enforce-node-allocatable": "''", + "event-qps": "0", + "feature-gates": "HyperVContainer=true,WindowsGMSA=true", + "image-pull-progress-deadline": "30m", + "kube-reserved": "cpu=500m,memory=500Mi,ephemeral-storage=1Gi", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "''", + "streaming-connection-idle-timeout": "30m", + "system-reserved": "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "[PREFIX_PATH]/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "enable-dsr": "false", + "feature-gates": "WinOverlay=true", + "healthz-bind-address": "127.0.0.1", + "proxy-mode": "kernelspace", + "v": "2" + }, + "kubeController": null, + "scheduler": null + }, + "v1.16": { + "etcd": null, + "kubeapi": null, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cert-dir": "[PREFIX_PATH]/var/lib/kubelet/pki", + "cgroups-per-qos": "false", + "cni-bin-dir": "[PREFIX_PATH]/opt/cni/bin", + "cni-conf-dir": "[PREFIX_PATH]/etc/cni/net.d", + "enforce-node-allocatable": "''", + "event-qps": "0", + "feature-gates": "HyperVContainer=true,WindowsGMSA=true", + "image-pull-progress-deadline": "30m", + "kube-reserved": "cpu=500m,memory=500Mi,ephemeral-storage=1Gi", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "''", + "streaming-connection-idle-timeout": "30m", + "system-reserved": "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "[PREFIX_PATH]/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "enable-dsr": "false", + "feature-gates": "WinOverlay=true", + "healthz-bind-address": "127.0.0.1", + "proxy-mode": "kernelspace", + "v": "2" + }, + "kubeController": null, + "scheduler": null + }, + "v1.17": { + "etcd": null, + "kubeapi": null, + "kubelet": { + "address": "0.0.0.0", + "anonymous-auth": "false", + "authentication-token-webhook": "true", + "authorization-mode": "Webhook", + "cert-dir": "[PREFIX_PATH]/var/lib/kubelet/pki", + "cgroups-per-qos": "false", + "cni-bin-dir": "[PREFIX_PATH]/opt/cni/bin", + "cni-conf-dir": "[PREFIX_PATH]/etc/cni/net.d", + "enforce-node-allocatable": "''", + "event-qps": "0", + "feature-gates": "HyperVContainer=true,WindowsGMSA=true", + "image-pull-progress-deadline": "30m", + "kube-reserved": "cpu=500m,memory=500Mi,ephemeral-storage=1Gi", + "make-iptables-util-chains": "true", + "network-plugin": "cni", + "read-only-port": "0", + "resolv-conf": "''", + "streaming-connection-idle-timeout": "30m", + "system-reserved": "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi", + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "v": "2", + "volume-plugin-dir": "[PREFIX_PATH]/var/lib/kubelet/volumeplugins" + }, + "kubeproxy": { + "enable-dsr": "false", + "feature-gates": "WinOverlay=true", + "healthz-bind-address": "127.0.0.1", + "proxy-mode": "kernelspace", + "v": "2" + }, + "kubeController": null, + "scheduler": null + } + }, + "CisConfigParams": { + "default": { + "benchmarkVersion": "rke-cis-1.4" + }, + "v1.15": { + "benchmarkVersion": "rke-cis-1.4" + } + }, + "CisBenchmarkVersionInfo": { + "cis-1.4": { + "minKubernetesVersion": "1.13" + }, + "cis-1.5": { + "minKubernetesVersion": "1.15" + }, + "rke-cis-1.4": { + "minKubernetesVersion": "1.13" + }, + "rke-cis-1.5": { + "minKubernetesVersion": "1.15" + } + } +} \ No newline at end of file diff --git a/main.go b/main.go index 07371d119..e40363b7f 100644 --- a/main.go +++ b/main.go @@ -1,3 +1,5 @@ +//go:generate go run ./codegen/codegen.go +//go:generate go run ./vendor/github.com/go-bindata/go-bindata/go-bindata -o ./data/bindata.go -ignore bindata.go -pkg data -modtime 1557785965 -mode 0644 ./data/ package main import ( @@ -5,10 +7,9 @@ import ( "os" "regexp" - "github.com/rancher/rke/metadata" - "github.com/mattn/go-colorable" "github.com/rancher/rke/cmd" + "github.com/rancher/rke/metadata" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) diff --git a/metadata/metadata.go b/metadata/metadata.go index 9755a4cff..84f50228a 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -2,11 +2,23 @@ package metadata import ( "context" + "fmt" + "io/ioutil" + "net/http" + "os" "strings" + "time" + + "github.com/sirupsen/logrus" mVersion "github.com/mcuadros/go-version" - "github.com/rancher/kontainer-driver-metadata/rke" - "github.com/rancher/types/apis/management.cattle.io/v3" + "github.com/rancher/rke/data" + v3 "github.com/rancher/types/apis/management.cattle.io/v3" + "github.com/rancher/types/kdm" +) + +const ( + RancherMetadataURLEnv = "RANCHER_METADATA_URL" ) var ( @@ -20,34 +32,76 @@ var ( K8sBadVersions = map[string]bool{} K8sVersionToWindowsServiceOptions map[string]v3.KubernetesServicesOptions + + c = http.Client{ + Timeout: time.Second * 30, + } ) func InitMetadata(ctx context.Context) error { - initK8sRKESystemImages() - initAddonTemplates() - initServiceOptions() - initDockerOptions() + data, err := loadData() + if err != nil { + return fmt.Errorf("failed to load data.json, error: %v", err) + } + initK8sRKESystemImages(data) + initAddonTemplates(data) + initServiceOptions(data) + initDockerOptions(data) return nil } +// this method loads metadata from three place, if RANCHER_METADATA_URL is provided then load data from specified location. Otherwise load data from bindata. +func loadData() (kdm.Data, error) { + var b []byte + var err error + u := os.Getenv(RancherMetadataURLEnv) + if u != "" { + logrus.Debugf("Loading data.json from %s, timestamp: %s", u, time.Now().Format(time.RFC822)) + b, err = readFile(u) + if err != nil { + return kdm.Data{}, err + } + } else { + logrus.Debugf("Logging data.json from local source, timestamp: %s", time.Now().Format(time.RFC822)) + b, err = data.Asset("data/data.json") + if err != nil { + return kdm.Data{}, err + } + } + logrus.Debugf("data.json content: %v", string(b)) + return kdm.FromData(b) +} + +func readFile(file string) ([]byte, error) { + if strings.HasPrefix(file, "http") { + resp, err := c.Get(file) + if err != nil { + return nil, err + } + defer resp.Body.Close() + return ioutil.ReadAll(resp.Body) + } + return ioutil.ReadFile(file) +} + const RKEVersionDev = "v0.2.3" -func initAddonTemplates() { - K8sVersionToTemplates = rke.DriverData.K8sVersionedTemplates +func initAddonTemplates(data kdm.Data) { + K8sVersionToTemplates = data.K8sVersionedTemplates } -func initServiceOptions() { - K8sVersionToServiceOptions = interface{}(rke.DriverData.K8sVersionServiceOptions).(map[string]v3.KubernetesServicesOptions) - K8sVersionToWindowsServiceOptions = rke.DriverData.K8sVersionWindowsServiceOptions +func initServiceOptions(data kdm.Data) { + K8sVersionToServiceOptions = interface{}(data.K8sVersionServiceOptions).(map[string]v3.KubernetesServicesOptions) + K8sVersionToWindowsServiceOptions = data.K8sVersionWindowsServiceOptions } -func initDockerOptions() { - K8sVersionToDockerVersions = rke.DriverData.K8sVersionDockerInfo +func initDockerOptions(data kdm.Data) { + K8sVersionToDockerVersions = data.K8sVersionDockerInfo } -func initK8sRKESystemImages() { +func initK8sRKESystemImages(data kdm.Data) { K8sVersionToRKESystemImages = map[string]v3.RKESystemImages{} - rkeData := rke.DriverData + rkeData := data // non released versions if RKEVersion == "" { RKEVersion = RKEVersionDev diff --git a/scripts/validate b/scripts/validate index 17f8372eb..05a34bf1b 100755 --- a/scripts/validate +++ b/scripts/validate @@ -5,7 +5,7 @@ cd $(dirname $0)/.. echo Running validation -PACKAGES=". $(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|.trash-cache|vendor|bin)' | sed -e 's!^!./!' -e 's!$!/...!')" +PACKAGES=". $(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|.trash-cache|vendor|bin|data)' | sed -e 's!^!./!' -e 's!$!/...!')" echo Running: go vet go vet ${PACKAGES} diff --git a/templates/templates.go b/templates/templates.go index 5c3900255..571765999 100644 --- a/templates/templates.go +++ b/templates/templates.go @@ -10,9 +10,9 @@ import ( "github.com/Masterminds/sprig/v3" "github.com/blang/semver" "github.com/ghodss/yaml" - "github.com/rancher/kontainer-driver-metadata/rke/templates" "github.com/rancher/norman/types/convert" "github.com/rancher/rke/metadata" + "github.com/rancher/types/kdm" "github.com/sirupsen/logrus" ) @@ -69,7 +69,7 @@ func getTemplate(templateName, k8sVersion string) (string, error) { continue } if testRange(toMatch) { - return metadata.K8sVersionToTemplates[templates.TemplateKeys][versionData[k]], nil + return metadata.K8sVersionToTemplates[kdm.TemplateKeys][versionData[k]], nil } } return "", fmt.Errorf("no %s template found for k8sVersion %s", templateName, k8sVersion) diff --git a/vendor.go b/vendor.go new file mode 100644 index 000000000..f3eba9e23 --- /dev/null +++ b/vendor.go @@ -0,0 +1,9 @@ +// +build vendor + +package main + +import ( + _ "github.com/go-bindata/go-bindata/go-bindata" +) + +func main() {}