Skip to content
This repository has been archived by the owner on Sep 30, 2020. It is now read-only.

Commit

Permalink
Merge pull request #1856 from jorge07/0.12.x-flartcar
Browse files Browse the repository at this point in the history
[v0.12.x] Flatcar
  • Loading branch information
dominicgunn authored May 27, 2020
2 parents 0ea4367 + a5d83c3 commit d268c5a
Show file tree
Hide file tree
Showing 21 changed files with 171 additions and 88 deletions.
56 changes: 56 additions & 0 deletions cmd/ami.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package cmd

import (
"fmt"

"github.com/kubernetes-incubator/kube-aws/core/root"
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
"github.com/kubernetes-incubator/kube-aws/logger"
"github.com/spf13/cobra"
)

var (
cmdAmi = &cobra.Command{
Use: "ami",
Short: "Compare AMIID of cluster.yaml VS the last release",
Long: ``,
RunE: runCmdAmi,
SilenceUsage: true,
}
)

func init() {
RootCmd.AddCommand(cmdAmi)

}

func runCmdAmi(_ *cobra.Command, _ []string) error {
opts := root.NewOptions(true, true)
cluster, err := root.ClusterFromFile(configPath, opts, false)
if err != nil {
return fmt.Errorf("failed to read cluster config: %v", err)
}

region := cluster.ControlPlane().Region.Name
channel := string(cluster.ControlPlane().ReleaseChannel)

amiID, err := amiregistry.GetAMI(region, cluster.ControlPlane().ReleaseChannel)
if err != nil {
return fmt.Errorf("Impossible to retrieve FlatCar AMI for region %s, channel %s", region, channel)
}

if cluster.ControlPlane().AmiId == amiID {
logger.Infof("AmiID up to date")
return nil
}

successMsg := `
The Flatcar AmiId for region %s and release channel %s is different than the one in cluster definition.
Cluster.yaml:
- amiId: %s
+ amiId: %s
`
logger.Infof(successMsg, region, channel, cluster.ControlPlane().AmiId, amiID)
return nil
}
15 changes: 10 additions & 5 deletions cmd/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@ import (
"fmt"

"github.com/kubernetes-incubator/kube-aws/core/root/config"
"github.com/kubernetes-incubator/kube-aws/coreos/amiregistry"
"github.com/kubernetes-incubator/kube-aws/filegen"
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
"github.com/kubernetes-incubator/kube-aws/logger"
"github.com/kubernetes-incubator/kube-aws/model"
"github.com/spf13/cobra"
)

Expand All @@ -20,7 +21,8 @@ var (
SilenceUsage: true,
}

initOpts = config.InitialConfig{}
initOpts = config.InitialConfig{}
releaseChannel = ""
)

const (
Expand All @@ -37,7 +39,8 @@ func init() {
cmdInit.Flags().StringVar(&initOpts.AvailabilityZone, "availability-zone", "", "The AWS availability-zone to deploy to")
cmdInit.Flags().StringVar(&initOpts.KeyName, "key-name", "", "The AWS key-pair for ssh access to nodes")
cmdInit.Flags().StringVar(&initOpts.KMSKeyARN, "kms-key-arn", "", "The ARN of the AWS KMS key for encrypting TLS assets")
cmdInit.Flags().StringVar(&initOpts.AmiId, "ami-id", "", "The AMI ID of CoreOS. Last CoreOS Stable Channel selected by default if empty")
cmdInit.Flags().StringVar(&initOpts.AmiId, "ami-id", "", "The AMI ID of Flatcar. Last Flatcar Stable Channel selected by default if empty")
cmdInit.Flags().StringVar(&releaseChannel, "release-channel", defaultReleaseChannel, "Flatcar release channel for AMI")
cmdInit.Flags().BoolVar(&initOpts.NoRecordSet, "no-record-set", false, "Instruct kube-aws to not manage Route53 record sets for your K8S API endpoints")
}

Expand All @@ -55,12 +58,14 @@ func runCmdInit(_ *cobra.Command, _ []string) error {

if initOpts.AmiId == "" {
amiID, err := amiregistry.GetAMI(initOpts.Region.Name, defaultReleaseChannel)
initOpts.AmiId = amiID
if err != nil {
return fmt.Errorf("cannot retrieve CoreOS AMI for region %s, channel %s", initOpts.Region.Name, defaultReleaseChannel)
return fmt.Errorf("cannot retrieve Flatcar AMI for region %s, channel %s", initOpts.Region.Name, defaultReleaseChannel)
}
initOpts.AmiId = amiID
}

initOpts.ReleaseChannel = model.ReleaseChannel(defaultReleaseChannel)

if !initOpts.NoRecordSet && initOpts.HostedZoneID == "" {
return errors.New("missing required flags: either --hosted-zone-id or --no-record-set is required")
}
Expand Down
1 change: 1 addition & 0 deletions core/controlplane/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ func defaultConfigValues(t *testing.T, configYaml string) string {
defaultYaml := `
externalDNSName: test.staging.core-os.net
keyName: test-key-name
releaseChannel: stable
s3URI: s3://mybucket/mydir
region: us-west-1
clusterName: test-cluster-name
Expand Down
16 changes: 4 additions & 12 deletions core/controlplane/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
"github.com/go-yaml/yaml"
"github.com/kubernetes-incubator/kube-aws/cfnresource"
"github.com/kubernetes-incubator/kube-aws/cfnstack"
"github.com/kubernetes-incubator/kube-aws/coreos/amiregistry"
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
"github.com/kubernetes-incubator/kube-aws/gzipcompressor"
"github.com/kubernetes-incubator/kube-aws/logger"
"github.com/kubernetes-incubator/kube-aws/model"
Expand Down Expand Up @@ -170,11 +170,11 @@ func NewDefaultCluster() *Cluster {
DeploymentSettings: DeploymentSettings{
ClusterName: "kubernetes",
VPCCIDR: "10.0.0.0/16",
ReleaseChannel: "stable",
KubeAWSVersion: "UNKNOWN",
K8sVer: k8sVer,
ContainerRuntime: "docker",
Subnets: []model.Subnet{},
ReleaseChannel: model.DefaultReleaseChannel(),
EIPAllocationIDs: []string{},
Experimental: experimental,
Kubelet: kubelet,
Expand Down Expand Up @@ -513,7 +513,7 @@ type DeploymentSettings struct {
KeyName string `yaml:"keyName,omitempty"`
Region model.Region `yaml:",inline"`
AvailabilityZone string `yaml:"availabilityZone,omitempty"`
ReleaseChannel string `yaml:"releaseChannel,omitempty"`
ReleaseChannel model.ReleaseChannel `yaml:"releaseChannel,omitempty"`
AmiId string `yaml:"amiId,omitempty"`
DeprecatedVPCID string `yaml:"vpcId,omitempty"`
VPC model.VPC `yaml:"vpc,omitempty"`
Expand Down Expand Up @@ -921,12 +921,6 @@ const (
internetGatewayLogicalName = "InternetGateway"
)

var supportedReleaseChannels = map[string]bool{
"alpha": true,
"beta": true,
"stable": true,
}

func (c DeploymentSettings) ApiServerLeaseEndpointReconciler() (bool, error) {
constraint, err := semver.NewConstraint(">= 1.9")
if err != nil {
Expand Down Expand Up @@ -1391,11 +1385,9 @@ type DeploymentValidationResult struct {
}

func (c DeploymentSettings) Validate() (*DeploymentValidationResult, error) {
releaseChannelSupported := supportedReleaseChannels[c.ReleaseChannel]
if !releaseChannelSupported {
if err := c.ReleaseChannel.IsValid(); err != nil {
return nil, fmt.Errorf("releaseChannel %s is not supported", c.ReleaseChannel)
}

if c.KeyName == "" && len(c.SSHAuthorizedKeys) == 0 {
return nil, errors.New("Either keyName or sshAuthorizedKeys must be set")
}
Expand Down
2 changes: 1 addition & 1 deletion core/controlplane/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ releaseChannel: non-existent #this release channel will never exist
t.Errorf("failed to parse config %s: %v", confBody, err)
continue
}
if c.ReleaseChannel != conf.channel {
if string(c.ReleaseChannel) != conf.channel {
t.Errorf(
"parsed release channel %s does not match config: %s",
c.ReleaseChannel,
Expand Down
2 changes: 1 addition & 1 deletion core/nodepool/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/kubernetes-incubator/kube-aws/cfnresource"
cfg "github.com/kubernetes-incubator/kube-aws/core/controlplane/config"
"github.com/kubernetes-incubator/kube-aws/coreos/amiregistry"
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
"github.com/kubernetes-incubator/kube-aws/logger"
"github.com/kubernetes-incubator/kube-aws/model"
"github.com/kubernetes-incubator/kube-aws/model/derived"
Expand Down
6 changes: 3 additions & 3 deletions core/nodepool/config/templates/cloud-config-worker
Original file line number Diff line number Diff line change
Expand Up @@ -1258,10 +1258,10 @@ write_files:
echo "Keeping container around after build: ${KEEP_CONTAINER}"
echo "Additional flags: ${EMERGE_SOURCES}"

# If we are on CoreOS by default build for the current CoreOS version
if [[ -f /etc/lsb-release && -f /etc/coreos/update.conf ]]; then
# If we are on flatcar by default build for the current flatcar version
if [[ -f /etc/lsb-release && -f /etc/flatcar/update.conf ]]; then
source /etc/lsb-release
source /etc/coreos/update.conf
source /etc/flatcar/update.conf

COREOS_TRACK_DEFAULT=$GROUP
COREOS_VERSION_DEFAULT=$DISTRIB_RELEASE
Expand Down
4 changes: 2 additions & 2 deletions core/root/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -776,7 +776,7 @@ func (c *clusterImpl) ValidateStack(opts ...OperationTargets) (string, error) {
func streamJournaldLogs(c *clusterImpl, q chan struct{}) error {
logger.Infof("Streaming filtered Journald logs for log group '%s'...\nNOTE: Due to high initial entropy, '.service' failures may occur during the early stages of booting.\n", c.controlPlane.ClusterName)
cwlSvc := cloudwatchlogs.New(c.session)
s := time.Now().Unix() * 1E3
s := time.Now().Unix() * 1e3
t := s
in := cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &c.controlPlane.ClusterName,
Expand All @@ -800,7 +800,7 @@ func streamJournaldLogs(c *clusterImpl, q chan struct{}) error {
ms[*event.Message] = *event.Timestamp
res := model.SystemdMessageResponse{}
json.Unmarshal([]byte(*event.Message), &res)
s := int(((*event.Timestamp) - t) / 1E3)
s := int(((*event.Timestamp) - t) / 1e3)
d := fmt.Sprintf("+%.2d:%.2d:%.2d", s/3600, (s/60)%60, s%60)
logger.Infof("%s\t%s: \"%s\"\n", d, res.Hostname, res.Message)
}
Expand Down
1 change: 1 addition & 0 deletions core/root/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ type InitialConfig struct {
KeyName string
NoRecordSet bool
Region model.Region
ReleaseChannel model.ReleaseChannel
S3URI string
}

Expand Down
11 changes: 5 additions & 6 deletions core/root/config/templates/cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,16 @@ clusterName: {{.ClusterName}}
# The URI of the S3 bucket for the cluster
s3URI: {{.S3URI}}

# CoreOS release channel to use. Currently supported options: alpha, beta, stable
# Flatcar release channel to use. Currently supported options: alpha, beta, stable
# See coreos.com/releases for more information
#releaseChannel: stable
releaseChannel: stable

# The AMI ID of CoreOS.
# The AMI ID of Flatcar.
#
# To update this to the latest AMI run the following command with the appropriate region and channel then place the resulting ID here
# REGION=eu-west-1 && CHANNEL=stable && curl -s https://coreos.com/dist/aws/aws-$CHANNEL.json | jq -r ".\"$REGION\".hvm"
# To get this to the latest AMI run the following command: kube-aws ami
amiId: "{{.AmiId}}"

# Container Linux has automatic updates https://coreos.com/os/docs/latest/update-strategies.html. This can be a risk in certain situations and this is why is disabled by default and you can enable it by setting this param to false.
# Flatcar has automatic updates https://docs.flatcar-linux.org/os/update-strategies/#disable-automatic-updates-daemon. This can be a risk in certain situations and this is why is disabled by default and you can enable it by setting this param to false.
disableContainerLinuxAutomaticUpdates: true

# Override the CloudFormation logical sub-stack names of control plane, etcd and/or network.
Expand Down
47 changes: 0 additions & 47 deletions coreos/amiregistry/amiregistry.go

This file was deleted.

2 changes: 1 addition & 1 deletion docs/cli-reference/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ Initialize the base configuration for a cluster ready for customization prior to

| Flag | Description | Default |
| -- | -- | -- |
| `ami-id` | The AMI ID of CoreOS Container Linux to deploy | The latest AMI for the Container Linux release channel specified in `cluster.yaml` |
| `ami-id` | The AMI ID of Flatcar Container Linux to deploy | The latest AMI for the Container Linux release channel specified in `cluster.yaml` |
| `availability-zone` | The AWS availability-zone to deploy to. Note, this can be changed to multi AZ in `cluster.yaml` | none |
| `cluster-name` | The name of this cluster. This will be the name of the cloudformation stack | none |
| `external-dns-name` | The hostname that will route to the api server | none |
Expand Down
10 changes: 5 additions & 5 deletions docs/getting-started/step-2-render.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ This is the second step of [running Kubernetes on AWS](README.md). Before we lau

### EC2 key pair

The keypair that will authenticate SSH access to your EC2 instances. The public half of this key pair will be configured on each CoreOS node.
The keypair that will authenticate SSH access to your EC2 instances. The public half of this key pair will be configured on each Flatcar node.

After creating a key pair, you will use the name you gave the keys to configure the cluster. Key pairs are only available to EC2 instances in the same region. More info in the [EC2 Keypair docs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html).

Expand Down Expand Up @@ -167,7 +167,7 @@ Each component certificate is only valid for 90 days, while the CA is valid for
If deploying a production Kubernetes cluster, consider establishing PKI independently of this tool first. [Read more below.][tls-note]

**Did everything render correctly?**
If you are familiar with CoreOS and the AWS platform, you may want to include some additional customizations or optional features. Read on below to explore more.
If you are familiar with Flatcar and the AWS platform, you may want to include some additional customizations or optional features. Read on below to explore more.

[Yes, ready to launch the cluster][getting-started-step-3]

Expand Down Expand Up @@ -196,15 +196,15 @@ You can now customize your cluster by editing asset files. Any changes to these
* `cloud-config-worker`
* `cloud-config-controller`

This directory contains the [cloud-init](https://github.com/coreos/coreos-cloudinit) cloud-config userdata files. The CoreOS operating system supports automated provisioning via cloud-config files, which describe the various files, scripts and systemd actions necessary to produce a working cluster machine. These files are templated with your cluster configuration parameters and embedded into the CloudFormation stack template.
This directory contains the [cloud-init](https://github.com/coreos/coreos-cloudinit) cloud-config userdata files. The Flatcar operating system supports automated provisioning via cloud-config files, which describe the various files, scripts and systemd actions necessary to produce a working cluster machine. These files are templated with your cluster configuration parameters and embedded into the CloudFormation stack template.

Some common customizations are:

- [mounting ephemeral disks][mount-disks]
- [allow pods to mount RDB][rdb] or [iSCSI volumes][iscsi]
- [allowing access to insecure container registries][insecure-registry]
- [use host DNS configuration instead of a public DNS server][host-dns]
- [changing your CoreOS auto-update settings][update]
- [changing your Flatcar auto-update settings][update]
<br/><br/>

* **stack-template.json**
Expand All @@ -230,7 +230,7 @@ You can now customize your cluster by editing asset files. Any changes to these

### Kubernetes Container Runtime

The kube-aws tool now optionally supports using rkt as the kubernetes container runtime. To configure rkt as the container runtime you must run with a CoreOS version >= `v1151.0.0` and configure the runtime flag.
The kube-aws tool now optionally supports using rkt as the kubernetes container runtime. To configure rkt as the container runtime you must run with a Flatcar version >= `v1151.0.0` and configure the runtime flag.

Edit the `cluster.yaml` file:

Expand Down
2 changes: 1 addition & 1 deletion docs/getting-started/step-4-update.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ some of your system pods will break (especially `kube-dns`). Deleting the said s

There is no solution for hosting an etcd cluster in a way that is easily updateable in this fashion- so updates are automatically masked for the etcd instances. This means that, after the cluster is created, nothing about the etcd ec2 instances is allowed to be updated.

Fortunately, CoreOS update engine will take care of keeping the members of the etcd cluster up-to-date, but you as the operator will not be able to modify them after creation via the update mechanism.
Fortunately, Flatcar update engine will take care of keeping the members of the etcd cluster up-to-date, but you as the operator will not be able to modify them after creation via the update mechanism.

In the (near) future, etcd will be hosted on Kubernetes and this problem will no longer be relevant. Rather than concocting overly complex band-aid, we've decided to "punt" on this issue of the time being.
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorials/quick-start.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Quick Start

Get started with kube-aws and deploy a fully-functional Kubernetes cluster running on CoreOS Container Linux using AWS CloudFormation.
Get started with kube-aws and deploy a fully-functional Kubernetes cluster running on Flatcar Container Linux using AWS CloudFormation.

After completing this guide, you will be able to deploy applications to Kubernetes on AWS and interact with the Kubernetes API using the `kubectl` CLI tool.

Expand Down
4 changes: 2 additions & 2 deletions etcdadm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ save it in S3
* `etcdadm restore` restores the etcd member running on the same node as etcdadm from a snapshot saved in S3
* `etcdadm check` runs health checks against all the members in an etcd cluster so that `kubeadm reconfigure` updates the etcd member accordingly to the situation
* `etcdadm reconfigure` reconfigures the etcd member on the same node as etcdadm so that it survives:
* `N/2` or less permanently failed members, by automatically removing a permanently failed member and then re-add it as a brand-new member with empty data according to ["Replace a failed etcd member on CoreOS Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#replace-a-failed-etcd-member-on-coreos-container-linux)
* `(N/2)+1` or more permanently failed members, by automatically initiating a new cluster, from a snapshot if it exists, according to ["etcd disaster recovery on CoreOS Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#etcd-disaster-recovery-on-coreos-container-linux)
* `N/2` or less permanently failed members, by automatically removing a permanently failed member and then re-add it as a brand-new member with empty data according to ["Replace a failed etcd member on Flatcar Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#replace-a-failed-etcd-member-on-coreos-container-linux)
* `(N/2)+1` or more permanently failed members, by automatically initiating a new cluster, from a snapshot if it exists, according to ["etcd disaster recovery on Flatcar Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#etcd-disaster-recovery-on-coreos-container-linux)
* `etcdadm replace` is used to manually recover from an etcd member from a permanent failure. It resets the etcd member running on the same node as etcdadm by:
1. clearing the contents of the etcd data dir
2. removing and then re-adding the etcd member by running `etcdctl member remove` and then `etcdctl memer add`
Expand Down
Loading

0 comments on commit d268c5a

Please sign in to comment.